From 50ba0232fd5312410f1b65247e774244f89a628e Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sat, 18 May 2024 20:50:36 +0200 Subject: Merging upstream version 6.8.9. Signed-off-by: Daniel Baumann --- drivers/net/ethernet/intel/ice/Makefile | 5 +- drivers/net/ethernet/intel/ice/ice.h | 30 +- drivers/net/ethernet/intel/ice/ice_adminq_cmd.h | 192 +++--- drivers/net/ethernet/intel/ice/ice_base.c | 63 +- drivers/net/ethernet/intel/ice/ice_base.h | 4 +- drivers/net/ethernet/intel/ice/ice_common.c | 330 +++------- drivers/net/ethernet/intel/ice/ice_common.h | 4 +- drivers/net/ethernet/intel/ice/ice_dcb.c | 79 +-- drivers/net/ethernet/intel/ice/ice_dcb_lib.c | 2 +- drivers/net/ethernet/intel/ice/ice_dcb_nl.c | 2 +- drivers/net/ethernet/intel/ice/ice_debugfs.c | 667 +++++++++++++++++++++ drivers/net/ethernet/intel/ice/ice_devlink.c | 49 ++ drivers/net/ethernet/intel/ice/ice_devlink.h | 1 + drivers/net/ethernet/intel/ice/ice_dpll.c | 30 +- drivers/net/ethernet/intel/ice/ice_eswitch.c | 568 ++++++++++-------- drivers/net/ethernet/intel/ice/ice_eswitch.h | 22 +- drivers/net/ethernet/intel/ice/ice_eswitch_br.c | 22 +- drivers/net/ethernet/intel/ice/ice_ethtool.c | 116 ++-- drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c | 51 +- drivers/net/ethernet/intel/ice/ice_fdir.c | 69 +-- drivers/net/ethernet/intel/ice/ice_flex_pipe.c | 52 +- drivers/net/ethernet/intel/ice/ice_flex_pipe.h | 4 +- drivers/net/ethernet/intel/ice/ice_flex_type.h | 7 + drivers/net/ethernet/intel/ice/ice_flow.c | 482 +++++++++++---- drivers/net/ethernet/intel/ice/ice_flow.h | 60 +- drivers/net/ethernet/intel/ice/ice_fwlog.c | 470 +++++++++++++++ drivers/net/ethernet/intel/ice/ice_fwlog.h | 79 +++ drivers/net/ethernet/intel/ice/ice_hw_autogen.h | 6 + drivers/net/ethernet/intel/ice/ice_hwmon.c | 126 ++++ drivers/net/ethernet/intel/ice/ice_hwmon.h | 15 + drivers/net/ethernet/intel/ice/ice_lag.c | 36 +- drivers/net/ethernet/intel/ice/ice_lag.h | 3 + drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h | 412 ++++++------- drivers/net/ethernet/intel/ice/ice_lib.c | 385 +++++++----- drivers/net/ethernet/intel/ice/ice_lib.h | 10 + drivers/net/ethernet/intel/ice/ice_main.c | 334 +++++++++-- drivers/net/ethernet/intel/ice/ice_nvm.c | 15 +- drivers/net/ethernet/intel/ice/ice_osdep.h | 2 +- drivers/net/ethernet/intel/ice/ice_ptp.c | 315 +++++++--- drivers/net/ethernet/intel/ice/ice_ptp.h | 27 +- drivers/net/ethernet/intel/ice/ice_ptp_consts.h | 12 +- drivers/net/ethernet/intel/ice/ice_ptp_hw.c | 444 +++++++------- drivers/net/ethernet/intel/ice/ice_ptp_hw.h | 49 +- drivers/net/ethernet/intel/ice/ice_repr.c | 195 +++--- drivers/net/ethernet/intel/ice/ice_repr.h | 9 +- drivers/net/ethernet/intel/ice/ice_sched.c | 3 +- drivers/net/ethernet/intel/ice/ice_sriov.c | 61 +- drivers/net/ethernet/intel/ice/ice_switch.c | 124 ++-- drivers/net/ethernet/intel/ice/ice_switch.h | 4 +- drivers/net/ethernet/intel/ice/ice_tc_lib.c | 60 +- drivers/net/ethernet/intel/ice/ice_txrx.c | 25 +- drivers/net/ethernet/intel/ice/ice_txrx.h | 32 +- drivers/net/ethernet/intel/ice/ice_txrx_lib.c | 207 ++++++- drivers/net/ethernet/intel/ice/ice_txrx_lib.h | 18 +- drivers/net/ethernet/intel/ice/ice_type.h | 44 +- drivers/net/ethernet/intel/ice/ice_vf_lib.c | 25 +- drivers/net/ethernet/intel/ice/ice_vf_lib.h | 2 +- .../net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c | 18 +- drivers/net/ethernet/intel/ice/ice_virtchnl.c | 104 +++- drivers/net/ethernet/intel/ice/ice_virtchnl.h | 10 + .../ethernet/intel/ice/ice_virtchnl_allowlist.c | 1 + drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c | 48 +- drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c | 25 +- drivers/net/ethernet/intel/ice/ice_xsk.c | 17 +- 64 files changed, 4580 insertions(+), 2103 deletions(-) create mode 100644 drivers/net/ethernet/intel/ice/ice_debugfs.c create mode 100644 drivers/net/ethernet/intel/ice/ice_fwlog.c create mode 100644 drivers/net/ethernet/intel/ice/ice_fwlog.h create mode 100644 drivers/net/ethernet/intel/ice/ice_hwmon.c create mode 100644 drivers/net/ethernet/intel/ice/ice_hwmon.h (limited to 'drivers/net/ethernet/intel/ice') diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile index 0679907980..cddd82d4ca 100644 --- a/drivers/net/ethernet/intel/ice/Makefile +++ b/drivers/net/ethernet/intel/ice/Makefile @@ -34,7 +34,9 @@ ice-y := ice_main.o \ ice_lag.o \ ice_ethtool.o \ ice_repr.o \ - ice_tc_lib.o + ice_tc_lib.o \ + ice_fwlog.o \ + ice_debugfs.o ice-$(CONFIG_PCI_IOV) += \ ice_sriov.o \ ice_virtchnl.o \ @@ -49,3 +51,4 @@ ice-$(CONFIG_RFS_ACCEL) += ice_arfs.o ice-$(CONFIG_XDP_SOCKETS) += ice_xsk.o ice-$(CONFIG_ICE_SWITCHDEV) += ice_eswitch.o ice_eswitch_br.o ice-$(CONFIG_GNSS) += ice_gnss.o +ice-$(CONFIG_ICE_HWMON) += ice_hwmon.o diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 351e0d36df..367b613d92 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -360,6 +360,7 @@ struct ice_vsi { /* RSS config */ u16 rss_table_size; /* HW RSS table size */ u16 rss_size; /* Allocated RSS queues */ + u8 rss_hfunc; /* User configured hash type */ u8 *rss_hkey_user; /* User configured hash keys */ u8 *rss_lut_user; /* User configured lookup table entries */ u8 rss_lut_type; /* used to configure Get/Set RSS LUT AQ call */ @@ -517,16 +518,22 @@ enum ice_pf_flags { }; enum ice_misc_thread_tasks { - ICE_MISC_THREAD_EXTTS_EVENT, ICE_MISC_THREAD_TX_TSTAMP, ICE_MISC_THREAD_NBITS /* must be last */ }; -struct ice_switchdev_info { +struct ice_eswitch { struct ice_vsi *control_vsi; struct ice_vsi *uplink_vsi; struct ice_esw_br_offloads *br_offloads; + struct xarray reprs; bool is_running; + /* struct to allow cp queues management optimization */ + struct { + int to_reach; + int value; + bool is_reaching; + } qs; }; struct ice_agg_node { @@ -563,6 +570,10 @@ struct ice_pf { struct ice_vsi_stats **vsi_stats; struct ice_sw *first_sw; /* first switch created by firmware */ u16 eswitch_mode; /* current mode of eswitch */ + struct dentry *ice_debugfs_pf; + struct dentry *ice_debugfs_pf_fwlog; + /* keep track of all the dentrys for FW log modules */ + struct dentry **ice_debugfs_pf_fwlog_modules; struct ice_vfs vfs; DECLARE_BITMAP(features, ICE_F_MAX); DECLARE_BITMAP(state, ICE_STATE_NBITS); @@ -597,6 +608,7 @@ struct ice_pf { u32 hw_csum_rx_error; u32 oicr_err_reg; struct msi_map oicr_irq; /* Other interrupt cause MSIX vector */ + struct msi_map ll_ts_irq; /* LL_TS interrupt MSIX vector */ u16 max_pf_txqs; /* Total Tx queues PF wide */ u16 max_pf_rxqs; /* Total Rx queues PF wide */ u16 num_lan_msix; /* Total MSIX vectors for base driver */ @@ -621,6 +633,7 @@ struct ice_pf { unsigned long tx_timeout_last_recovery; u32 tx_timeout_recovery_level; char int_name[ICE_INT_NAME_STR_LEN]; + char int_name_ll_ts[ICE_INT_NAME_STR_LEN]; struct auxiliary_device *adev; int aux_idx; u32 sw_int_count; @@ -637,7 +650,7 @@ struct ice_pf { struct ice_link_default_override_tlv link_dflt_override; struct ice_lag *lag; /* Link Aggregation information */ - struct ice_switchdev_info switchdev; + struct ice_eswitch eswitch; struct ice_esw_br_port *br_port; #define ICE_INVALID_AGG_NODE_ID 0 @@ -648,6 +661,7 @@ struct ice_pf { #define ICE_MAX_VF_AGG_NODES 32 struct ice_agg_node vf_agg_node[ICE_MAX_VF_AGG_NODES]; struct ice_dplls dplls; + struct device *hwmon_dev; }; extern struct workqueue_struct *ice_lag_wq; @@ -846,7 +860,7 @@ static inline struct ice_vsi *ice_find_vsi(struct ice_pf *pf, u16 vsi_num) */ static inline bool ice_is_switchdev_running(struct ice_pf *pf) { - return pf->switchdev.is_running; + return pf->eswitch.is_running; } #define ICE_FD_STAT_CTR_BLOCK_COUNT 256 @@ -881,6 +895,11 @@ static inline bool ice_is_adq_active(struct ice_pf *pf) return false; } +void ice_debugfs_fwlog_init(struct ice_pf *pf); +void ice_debugfs_init(void); +void ice_debugfs_exit(void); +void ice_pf_fwlog_update_module(struct ice_pf *pf, int log_level, int module); + bool netif_is_ice(const struct net_device *dev); int ice_vsi_setup_tx_rings(struct ice_vsi *vsi); int ice_vsi_setup_rx_rings(struct ice_vsi *vsi); @@ -912,6 +931,7 @@ int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size); int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size); int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed); int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed); +int ice_set_rss_hfunc(struct ice_vsi *vsi, u8 hfunc); void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size); int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset); void ice_print_link_msg(struct ice_vsi *vsi, bool isup); @@ -989,4 +1009,6 @@ static inline void ice_clear_rdma_cap(struct ice_pf *pf) set_bit(ICE_FLAG_UNPLUG_AUX_DEV, pf->flags); clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); } + +extern const struct xdp_metadata_ops ice_xdp_md_ops; #endif /* _ICE_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index b8437c36ff..1f3e7a6903 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -117,6 +117,7 @@ struct ice_aqc_list_caps_elem { #define ICE_AQC_CAPS_NET_VER 0x004C #define ICE_AQC_CAPS_PENDING_NET_VER 0x004D #define ICE_AQC_CAPS_RDMA 0x0051 +#define ICE_AQC_CAPS_SENSOR_READING 0x0067 #define ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE 0x0076 #define ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT 0x0077 #define ICE_AQC_CAPS_NVM_MGMT 0x0080 @@ -592,8 +593,9 @@ struct ice_aqc_recipe_data_elem { struct ice_aqc_recipe_to_profile { __le16 profile_id; u8 rsvd[6]; - DECLARE_BITMAP(recipe_assoc, ICE_MAX_NUM_RECIPES); + __le64 recipe_assoc; }; +static_assert(sizeof(struct ice_aqc_recipe_to_profile) == 16); /* Add/Update/Remove/Get switch rules (indirect 0x02A0, 0x02A1, 0x02A2, 0x02A3) */ @@ -1414,6 +1416,30 @@ struct ice_aqc_get_phy_rec_clk_out { __le16 node_handle; }; +/* Get sensor reading (direct 0x0632) */ +struct ice_aqc_get_sensor_reading { + u8 sensor; + u8 format; + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; +}; + +/* Get sensor reading response (direct 0x0632) */ +struct ice_aqc_get_sensor_reading_resp { + union { + u8 raw[8]; + /* Output data for sensor 0x00, format 0x00 */ + struct _packed { + s8 temp; + u8 temp_warning_threshold; + u8 temp_critical_threshold; + u8 temp_fatal_threshold; + u8 reserved[4]; + } s0f0; + } data; +}; + struct ice_aqc_link_topo_params { u8 lport_num; u8 lport_num_valid; @@ -2070,78 +2096,6 @@ struct ice_aqc_add_rdma_qset_data { struct ice_aqc_add_tx_rdma_qset_entry rdma_qsets[]; }; -/* Configure Firmware Logging Command (indirect 0xFF09) - * Logging Information Read Response (indirect 0xFF10) - * Note: The 0xFF10 command has no input parameters. - */ -struct ice_aqc_fw_logging { - u8 log_ctrl; -#define ICE_AQC_FW_LOG_AQ_EN BIT(0) -#define ICE_AQC_FW_LOG_UART_EN BIT(1) - u8 rsvd0; - u8 log_ctrl_valid; /* Not used by 0xFF10 Response */ -#define ICE_AQC_FW_LOG_AQ_VALID BIT(0) -#define ICE_AQC_FW_LOG_UART_VALID BIT(1) - u8 rsvd1[5]; - __le32 addr_high; - __le32 addr_low; -}; - -enum ice_aqc_fw_logging_mod { - ICE_AQC_FW_LOG_ID_GENERAL = 0, - ICE_AQC_FW_LOG_ID_CTRL, - ICE_AQC_FW_LOG_ID_LINK, - ICE_AQC_FW_LOG_ID_LINK_TOPO, - ICE_AQC_FW_LOG_ID_DNL, - ICE_AQC_FW_LOG_ID_I2C, - ICE_AQC_FW_LOG_ID_SDP, - ICE_AQC_FW_LOG_ID_MDIO, - ICE_AQC_FW_LOG_ID_ADMINQ, - ICE_AQC_FW_LOG_ID_HDMA, - ICE_AQC_FW_LOG_ID_LLDP, - ICE_AQC_FW_LOG_ID_DCBX, - ICE_AQC_FW_LOG_ID_DCB, - ICE_AQC_FW_LOG_ID_NETPROXY, - ICE_AQC_FW_LOG_ID_NVM, - ICE_AQC_FW_LOG_ID_AUTH, - ICE_AQC_FW_LOG_ID_VPD, - ICE_AQC_FW_LOG_ID_IOSF, - ICE_AQC_FW_LOG_ID_PARSER, - ICE_AQC_FW_LOG_ID_SW, - ICE_AQC_FW_LOG_ID_SCHEDULER, - ICE_AQC_FW_LOG_ID_TXQ, - ICE_AQC_FW_LOG_ID_RSVD, - ICE_AQC_FW_LOG_ID_POST, - ICE_AQC_FW_LOG_ID_WATCHDOG, - ICE_AQC_FW_LOG_ID_TASK_DISPATCH, - ICE_AQC_FW_LOG_ID_MNG, - ICE_AQC_FW_LOG_ID_MAX, -}; - -/* Defines for both above FW logging command/response buffers */ -#define ICE_AQC_FW_LOG_ID_S 0 -#define ICE_AQC_FW_LOG_ID_M (0xFFF << ICE_AQC_FW_LOG_ID_S) - -#define ICE_AQC_FW_LOG_CONF_SUCCESS 0 /* Used by response */ -#define ICE_AQC_FW_LOG_CONF_BAD_INDX BIT(12) /* Used by response */ - -#define ICE_AQC_FW_LOG_EN_S 12 -#define ICE_AQC_FW_LOG_EN_M (0xF << ICE_AQC_FW_LOG_EN_S) -#define ICE_AQC_FW_LOG_INFO_EN BIT(12) /* Used by command */ -#define ICE_AQC_FW_LOG_INIT_EN BIT(13) /* Used by command */ -#define ICE_AQC_FW_LOG_FLOW_EN BIT(14) /* Used by command */ -#define ICE_AQC_FW_LOG_ERR_EN BIT(15) /* Used by command */ - -/* Get/Clear FW Log (indirect 0xFF11) */ -struct ice_aqc_get_clear_fw_log { - u8 flags; -#define ICE_AQC_FW_LOG_CLEAR BIT(0) -#define ICE_AQC_FW_LOG_MORE_DATA_AVAIL BIT(1) - u8 rsvd1[7]; - __le32 addr_high; - __le32 addr_low; -}; - /* Download Package (indirect 0x0C40) */ /* Also used for Update Package (indirect 0x0C41 and 0x0C42) */ struct ice_aqc_download_pkg { @@ -2404,6 +2358,84 @@ struct ice_aqc_event_lan_overflow { u8 reserved[8]; }; +enum ice_aqc_fw_logging_mod { + ICE_AQC_FW_LOG_ID_GENERAL = 0, + ICE_AQC_FW_LOG_ID_CTRL, + ICE_AQC_FW_LOG_ID_LINK, + ICE_AQC_FW_LOG_ID_LINK_TOPO, + ICE_AQC_FW_LOG_ID_DNL, + ICE_AQC_FW_LOG_ID_I2C, + ICE_AQC_FW_LOG_ID_SDP, + ICE_AQC_FW_LOG_ID_MDIO, + ICE_AQC_FW_LOG_ID_ADMINQ, + ICE_AQC_FW_LOG_ID_HDMA, + ICE_AQC_FW_LOG_ID_LLDP, + ICE_AQC_FW_LOG_ID_DCBX, + ICE_AQC_FW_LOG_ID_DCB, + ICE_AQC_FW_LOG_ID_XLR, + ICE_AQC_FW_LOG_ID_NVM, + ICE_AQC_FW_LOG_ID_AUTH, + ICE_AQC_FW_LOG_ID_VPD, + ICE_AQC_FW_LOG_ID_IOSF, + ICE_AQC_FW_LOG_ID_PARSER, + ICE_AQC_FW_LOG_ID_SW, + ICE_AQC_FW_LOG_ID_SCHEDULER, + ICE_AQC_FW_LOG_ID_TXQ, + ICE_AQC_FW_LOG_ID_RSVD, + ICE_AQC_FW_LOG_ID_POST, + ICE_AQC_FW_LOG_ID_WATCHDOG, + ICE_AQC_FW_LOG_ID_TASK_DISPATCH, + ICE_AQC_FW_LOG_ID_MNG, + ICE_AQC_FW_LOG_ID_SYNCE, + ICE_AQC_FW_LOG_ID_HEALTH, + ICE_AQC_FW_LOG_ID_TSDRV, + ICE_AQC_FW_LOG_ID_PFREG, + ICE_AQC_FW_LOG_ID_MDLVER, + ICE_AQC_FW_LOG_ID_MAX, +}; + +/* Set FW Logging configuration (indirect 0xFF30) + * Register for FW Logging (indirect 0xFF31) + * Query FW Logging (indirect 0xFF32) + * FW Log Event (indirect 0xFF33) + */ +struct ice_aqc_fw_log { + u8 cmd_flags; +#define ICE_AQC_FW_LOG_CONF_UART_EN BIT(0) +#define ICE_AQC_FW_LOG_CONF_AQ_EN BIT(1) +#define ICE_AQC_FW_LOG_QUERY_REGISTERED BIT(2) +#define ICE_AQC_FW_LOG_CONF_SET_VALID BIT(3) +#define ICE_AQC_FW_LOG_AQ_REGISTER BIT(0) +#define ICE_AQC_FW_LOG_AQ_QUERY BIT(2) + + u8 rsp_flag; + __le16 fw_rt_msb; + union { + struct { + __le32 fw_rt_lsb; + } sync; + struct { + __le16 log_resolution; +#define ICE_AQC_FW_LOG_MIN_RESOLUTION (1) +#define ICE_AQC_FW_LOG_MAX_RESOLUTION (128) + + __le16 mdl_cnt; + } cfg; + } ops; + __le32 addr_high; + __le32 addr_low; +}; + +/* Response Buffer for: + * Set Firmware Logging Configuration (0xFF30) + * Query FW Logging (0xFF32) + */ +struct ice_aqc_fw_log_cfg_resp { + __le16 module_identifier; + u8 log_level; + u8 rsvd0; +}; + /** * struct ice_aq_desc - Admin Queue (AQ) descriptor * @flags: ICE_AQ_FLAG_* flags @@ -2444,6 +2476,8 @@ struct ice_aq_desc { struct ice_aqc_restart_an restart_an; struct ice_aqc_set_phy_rec_clk_out set_phy_rec_clk_out; struct ice_aqc_get_phy_rec_clk_out get_phy_rec_clk_out; + struct ice_aqc_get_sensor_reading get_sensor_reading; + struct ice_aqc_get_sensor_reading_resp get_sensor_reading_resp; struct ice_aqc_gpio read_write_gpio; struct ice_aqc_sff_eeprom read_write_sff_param; struct ice_aqc_set_port_id_led set_port_id_led; @@ -2481,8 +2515,6 @@ struct ice_aq_desc { struct ice_aqc_add_rdma_qset add_rdma_qset; struct ice_aqc_add_get_update_free_vsi vsi_cmd; struct ice_aqc_add_update_free_vsi_resp add_update_free_vsi_res; - struct ice_aqc_fw_logging fw_logging; - struct ice_aqc_get_clear_fw_log get_clear_fw_log; struct ice_aqc_download_pkg download_pkg; struct ice_aqc_set_cgu_input_config set_cgu_input_config; struct ice_aqc_get_cgu_input_config get_cgu_input_config; @@ -2494,6 +2526,7 @@ struct ice_aq_desc { struct ice_aqc_get_cgu_ref_prio get_cgu_ref_prio; struct ice_aqc_get_cgu_info get_cgu_info; struct ice_aqc_driver_shared_params drv_shared_params; + struct ice_aqc_fw_log fw_log; struct ice_aqc_set_mac_lb set_mac_lb; struct ice_aqc_alloc_free_res_cmd sw_res_ctrl; struct ice_aqc_set_mac_cfg set_mac_cfg; @@ -2619,6 +2652,7 @@ enum ice_adminq_opc { ice_aqc_opc_set_mac_lb = 0x0620, ice_aqc_opc_set_phy_rec_clk_out = 0x0630, ice_aqc_opc_get_phy_rec_clk_out = 0x0631, + ice_aqc_opc_get_sensor_reading = 0x0632, ice_aqc_opc_get_link_topo = 0x06E0, ice_aqc_opc_read_i2c = 0x06E2, ice_aqc_opc_write_i2c = 0x06E3, @@ -2691,9 +2725,11 @@ enum ice_adminq_opc { /* Standalone Commands/Events */ ice_aqc_opc_event_lan_overflow = 0x1001, - /* debug commands */ - ice_aqc_opc_fw_logging = 0xFF09, - ice_aqc_opc_fw_logging_info = 0xFF10, + /* FW Logging Commands */ + ice_aqc_opc_fw_logs_config = 0xFF30, + ice_aqc_opc_fw_logs_register = 0xFF31, + ice_aqc_opc_fw_logs_query = 0xFF32, + ice_aqc_opc_fw_logs_event = 0xFF33, }; #endif /* _ICE_ADMINQ_CMD_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c index 4f3e65b47c..c979192e44 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@ -189,10 +189,16 @@ static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx) } q_vector = vsi->q_vectors[v_idx]; - ice_for_each_tx_ring(tx_ring, q_vector->tx) + ice_for_each_tx_ring(tx_ring, q_vector->tx) { + ice_queue_set_napi(vsi, tx_ring->q_index, NETDEV_QUEUE_TYPE_TX, + NULL); tx_ring->q_vector = NULL; - ice_for_each_rx_ring(rx_ring, q_vector->rx) + } + ice_for_each_rx_ring(rx_ring, q_vector->rx) { + ice_queue_set_napi(vsi, rx_ring->q_index, NETDEV_QUEUE_TYPE_RX, + NULL); rx_ring->q_vector = NULL; + } /* only VSI with an associated netdev is set up with NAPI */ if (vsi->netdev) @@ -224,24 +230,16 @@ static void ice_cfg_itr_gran(struct ice_hw *hw) /* no need to update global register if ITR gran is already set */ if (!(regval & GLINT_CTL_DIS_AUTOMASK_M) && - (((regval & GLINT_CTL_ITR_GRAN_200_M) >> - GLINT_CTL_ITR_GRAN_200_S) == ICE_ITR_GRAN_US) && - (((regval & GLINT_CTL_ITR_GRAN_100_M) >> - GLINT_CTL_ITR_GRAN_100_S) == ICE_ITR_GRAN_US) && - (((regval & GLINT_CTL_ITR_GRAN_50_M) >> - GLINT_CTL_ITR_GRAN_50_S) == ICE_ITR_GRAN_US) && - (((regval & GLINT_CTL_ITR_GRAN_25_M) >> - GLINT_CTL_ITR_GRAN_25_S) == ICE_ITR_GRAN_US)) + (FIELD_GET(GLINT_CTL_ITR_GRAN_200_M, regval) == ICE_ITR_GRAN_US) && + (FIELD_GET(GLINT_CTL_ITR_GRAN_100_M, regval) == ICE_ITR_GRAN_US) && + (FIELD_GET(GLINT_CTL_ITR_GRAN_50_M, regval) == ICE_ITR_GRAN_US) && + (FIELD_GET(GLINT_CTL_ITR_GRAN_25_M, regval) == ICE_ITR_GRAN_US)) return; - regval = ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_200_S) & - GLINT_CTL_ITR_GRAN_200_M) | - ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_100_S) & - GLINT_CTL_ITR_GRAN_100_M) | - ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_50_S) & - GLINT_CTL_ITR_GRAN_50_M) | - ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_25_S) & - GLINT_CTL_ITR_GRAN_25_M); + regval = FIELD_PREP(GLINT_CTL_ITR_GRAN_200_M, ICE_ITR_GRAN_US) | + FIELD_PREP(GLINT_CTL_ITR_GRAN_100_M, ICE_ITR_GRAN_US) | + FIELD_PREP(GLINT_CTL_ITR_GRAN_50_M, ICE_ITR_GRAN_US) | + FIELD_PREP(GLINT_CTL_ITR_GRAN_25_M, ICE_ITR_GRAN_US); wr32(hw, GLINT_CTL, regval); } @@ -278,7 +276,7 @@ static u16 ice_calc_txq_handle(struct ice_vsi *vsi, struct ice_tx_ring *ring, u8 */ static u16 ice_eswitch_calc_txq_handle(struct ice_tx_ring *ring) { - struct ice_vsi *vsi = ring->vsi; + const struct ice_vsi *vsi = ring->vsi; int i; ice_for_each_txq(vsi, i) { @@ -519,6 +517,19 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring) return 0; } +static void ice_xsk_pool_fill_cb(struct ice_rx_ring *ring) +{ + void *ctx_ptr = &ring->pkt_ctx; + struct xsk_cb_desc desc = {}; + + XSK_CHECK_PRIV_TYPE(struct ice_xdp_buff); + desc.src = &ctx_ptr; + desc.off = offsetof(struct ice_xdp_buff, pkt_ctx) - + sizeof(struct xdp_buff); + desc.bytes = sizeof(ctx_ptr); + xsk_pool_fill_cb(ring->xsk_pool, &desc); +} + /** * ice_vsi_cfg_rxq - Configure an Rx queue * @ring: the ring being configured @@ -561,6 +572,7 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring) if (err) return err; xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq); + ice_xsk_pool_fill_cb(ring); dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n", ring->q_index); @@ -584,6 +596,7 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring) xdp_init_buff(&ring->xdp, ice_rx_pg_size(ring) / 2, &ring->xdp_rxq); ring->xdp.data = NULL; + ring->xdp_ext.pkt_ctx = &ring->pkt_ctx; err = ice_setup_rx_ctx(ring); if (err) { dev_err(dev, "ice_setup_rx_ctx failed for RxQ %d, err %d\n", @@ -922,10 +935,10 @@ ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx) struct ice_hw *hw = &pf->hw; u32 val; - itr_idx = (itr_idx << QINT_TQCTL_ITR_INDX_S) & QINT_TQCTL_ITR_INDX_M; + itr_idx = FIELD_PREP(QINT_TQCTL_ITR_INDX_M, itr_idx); val = QINT_TQCTL_CAUSE_ENA_M | itr_idx | - ((msix_idx << QINT_TQCTL_MSIX_INDX_S) & QINT_TQCTL_MSIX_INDX_M); + FIELD_PREP(QINT_TQCTL_MSIX_INDX_M, msix_idx); wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val); if (ice_is_xdp_ena_vsi(vsi)) { @@ -954,10 +967,10 @@ ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx) struct ice_hw *hw = &pf->hw; u32 val; - itr_idx = (itr_idx << QINT_RQCTL_ITR_INDX_S) & QINT_RQCTL_ITR_INDX_M; + itr_idx = FIELD_PREP(QINT_RQCTL_ITR_INDX_M, itr_idx); val = QINT_RQCTL_CAUSE_ENA_M | itr_idx | - ((msix_idx << QINT_RQCTL_MSIX_INDX_S) & QINT_RQCTL_MSIX_INDX_M); + FIELD_PREP(QINT_RQCTL_MSIX_INDX_M, msix_idx); wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val); @@ -969,7 +982,7 @@ ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx) * @hw: pointer to the HW structure * @q_vector: interrupt vector to trigger the software interrupt for */ -void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector) +void ice_trigger_sw_intr(struct ice_hw *hw, const struct ice_q_vector *q_vector) { wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx), (ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) | @@ -1044,7 +1057,7 @@ ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, * are needed for stopping Tx queue */ void -ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_tx_ring *ring, +ice_fill_txq_meta(const struct ice_vsi *vsi, struct ice_tx_ring *ring, struct ice_txq_meta *txq_meta) { struct ice_channel *ch = ring->ch; diff --git a/drivers/net/ethernet/intel/ice/ice_base.h b/drivers/net/ethernet/intel/ice/ice_base.h index b67dca417a..17321ba756 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.h +++ b/drivers/net/ethernet/intel/ice/ice_base.h @@ -22,12 +22,12 @@ void ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx); void ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx); -void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector); +void ice_trigger_sw_intr(struct ice_hw *hw, const struct ice_q_vector *q_vector); int ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, u16 rel_vmvf_num, struct ice_tx_ring *ring, struct ice_txq_meta *txq_meta); void -ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_tx_ring *ring, +ice_fill_txq_meta(const struct ice_vsi *vsi, struct ice_tx_ring *ring, struct ice_txq_meta *txq_meta); #endif /* _ICE_BASE_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index edac34c796..10c32cd80f 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -933,216 +933,6 @@ static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw) devm_kfree(ice_hw_to_dev(hw), sw); } -/** - * ice_get_fw_log_cfg - get FW logging configuration - * @hw: pointer to the HW struct - */ -static int ice_get_fw_log_cfg(struct ice_hw *hw) -{ - struct ice_aq_desc desc; - __le16 *config; - int status; - u16 size; - - size = sizeof(*config) * ICE_AQC_FW_LOG_ID_MAX; - config = kzalloc(size, GFP_KERNEL); - if (!config) - return -ENOMEM; - - ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info); - - status = ice_aq_send_cmd(hw, &desc, config, size, NULL); - if (!status) { - u16 i; - - /* Save FW logging information into the HW structure */ - for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) { - u16 v, m, flgs; - - v = le16_to_cpu(config[i]); - m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S; - flgs = (v & ICE_AQC_FW_LOG_EN_M) >> ICE_AQC_FW_LOG_EN_S; - - if (m < ICE_AQC_FW_LOG_ID_MAX) - hw->fw_log.evnts[m].cur = flgs; - } - } - - kfree(config); - - return status; -} - -/** - * ice_cfg_fw_log - configure FW logging - * @hw: pointer to the HW struct - * @enable: enable certain FW logging events if true, disable all if false - * - * This function enables/disables the FW logging via Rx CQ events and a UART - * port based on predetermined configurations. FW logging via the Rx CQ can be - * enabled/disabled for individual PF's. However, FW logging via the UART can - * only be enabled/disabled for all PFs on the same device. - * - * To enable overall FW logging, the "cq_en" and "uart_en" enable bits in - * hw->fw_log need to be set accordingly, e.g. based on user-provided input, - * before initializing the device. - * - * When re/configuring FW logging, callers need to update the "cfg" elements of - * the hw->fw_log.evnts array with the desired logging event configurations for - * modules of interest. When disabling FW logging completely, the callers can - * just pass false in the "enable" parameter. On completion, the function will - * update the "cur" element of the hw->fw_log.evnts array with the resulting - * logging event configurations of the modules that are being re/configured. FW - * logging modules that are not part of a reconfiguration operation retain their - * previous states. - * - * Before resetting the device, it is recommended that the driver disables FW - * logging before shutting down the control queue. When disabling FW logging - * ("enable" = false), the latest configurations of FW logging events stored in - * hw->fw_log.evnts[] are not overridden to allow them to be reconfigured after - * a device reset. - * - * When enabling FW logging to emit log messages via the Rx CQ during the - * device's initialization phase, a mechanism alternative to interrupt handlers - * needs to be used to extract FW log messages from the Rx CQ periodically and - * to prevent the Rx CQ from being full and stalling other types of control - * messages from FW to SW. Interrupts are typically disabled during the device's - * initialization phase. - */ -static int ice_cfg_fw_log(struct ice_hw *hw, bool enable) -{ - struct ice_aqc_fw_logging *cmd; - u16 i, chgs = 0, len = 0; - struct ice_aq_desc desc; - __le16 *data = NULL; - u8 actv_evnts = 0; - void *buf = NULL; - int status = 0; - - if (!hw->fw_log.cq_en && !hw->fw_log.uart_en) - return 0; - - /* Disable FW logging only when the control queue is still responsive */ - if (!enable && - (!hw->fw_log.actv_evnts || !ice_check_sq_alive(hw, &hw->adminq))) - return 0; - - /* Get current FW log settings */ - status = ice_get_fw_log_cfg(hw); - if (status) - return status; - - ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging); - cmd = &desc.params.fw_logging; - - /* Indicate which controls are valid */ - if (hw->fw_log.cq_en) - cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_AQ_VALID; - - if (hw->fw_log.uart_en) - cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_UART_VALID; - - if (enable) { - /* Fill in an array of entries with FW logging modules and - * logging events being reconfigured. - */ - for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) { - u16 val; - - /* Keep track of enabled event types */ - actv_evnts |= hw->fw_log.evnts[i].cfg; - - if (hw->fw_log.evnts[i].cfg == hw->fw_log.evnts[i].cur) - continue; - - if (!data) { - data = devm_kcalloc(ice_hw_to_dev(hw), - ICE_AQC_FW_LOG_ID_MAX, - sizeof(*data), - GFP_KERNEL); - if (!data) - return -ENOMEM; - } - - val = i << ICE_AQC_FW_LOG_ID_S; - val |= hw->fw_log.evnts[i].cfg << ICE_AQC_FW_LOG_EN_S; - data[chgs++] = cpu_to_le16(val); - } - - /* Only enable FW logging if at least one module is specified. - * If FW logging is currently enabled but all modules are not - * enabled to emit log messages, disable FW logging altogether. - */ - if (actv_evnts) { - /* Leave if there is effectively no change */ - if (!chgs) - goto out; - - if (hw->fw_log.cq_en) - cmd->log_ctrl |= ICE_AQC_FW_LOG_AQ_EN; - - if (hw->fw_log.uart_en) - cmd->log_ctrl |= ICE_AQC_FW_LOG_UART_EN; - - buf = data; - len = sizeof(*data) * chgs; - desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); - } - } - - status = ice_aq_send_cmd(hw, &desc, buf, len, NULL); - if (!status) { - /* Update the current configuration to reflect events enabled. - * hw->fw_log.cq_en and hw->fw_log.uart_en indicate if the FW - * logging mode is enabled for the device. They do not reflect - * actual modules being enabled to emit log messages. So, their - * values remain unchanged even when all modules are disabled. - */ - u16 cnt = enable ? chgs : (u16)ICE_AQC_FW_LOG_ID_MAX; - - hw->fw_log.actv_evnts = actv_evnts; - for (i = 0; i < cnt; i++) { - u16 v, m; - - if (!enable) { - /* When disabling all FW logging events as part - * of device's de-initialization, the original - * configurations are retained, and can be used - * to reconfigure FW logging later if the device - * is re-initialized. - */ - hw->fw_log.evnts[i].cur = 0; - continue; - } - - v = le16_to_cpu(data[i]); - m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S; - hw->fw_log.evnts[m].cur = hw->fw_log.evnts[m].cfg; - } - } - -out: - devm_kfree(ice_hw_to_dev(hw), data); - - return status; -} - -/** - * ice_output_fw_log - * @hw: pointer to the HW struct - * @desc: pointer to the AQ message descriptor - * @buf: pointer to the buffer accompanying the AQ message - * - * Formats a FW Log message and outputs it via the standard driver logs. - */ -void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf) -{ - ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg Start ]\n"); - ice_debug_array(hw, ICE_DBG_FW_LOG, 16, 1, (u8 *)buf, - le16_to_cpu(desc->datalen)); - ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg End ]\n"); -} - /** * ice_get_itr_intrl_gran * @hw: pointer to the HW struct @@ -1152,9 +942,8 @@ void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf) */ static void ice_get_itr_intrl_gran(struct ice_hw *hw) { - u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) & - GL_PWR_MODE_CTL_CAR_MAX_BW_M) >> - GL_PWR_MODE_CTL_CAR_MAX_BW_S; + u8 max_agg_bw = FIELD_GET(GL_PWR_MODE_CTL_CAR_MAX_BW_M, + rd32(hw, GL_PWR_MODE_CTL)); switch (max_agg_bw) { case ICE_MAX_AGG_BW_200G: @@ -1186,9 +975,7 @@ int ice_init_hw(struct ice_hw *hw) if (status) return status; - hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) & - PF_FUNC_RID_FUNC_NUM_M) >> - PF_FUNC_RID_FUNC_NUM_S; + hw->pf_id = FIELD_GET(PF_FUNC_RID_FUNC_NUM_M, rd32(hw, PF_FUNC_RID)); status = ice_reset(hw, ICE_RESET_PFR); if (status) @@ -1200,10 +987,10 @@ int ice_init_hw(struct ice_hw *hw) if (status) goto err_unroll_cqinit; - /* Enable FW logging. Not fatal if this fails. */ - status = ice_cfg_fw_log(hw, true); + status = ice_fwlog_init(hw); if (status) - ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging.\n"); + ice_debug(hw, ICE_DBG_FW_LOG, "Error initializing FW logging: %d\n", + status); status = ice_clear_pf_cfg(hw); if (status) @@ -1354,8 +1141,7 @@ void ice_deinit_hw(struct ice_hw *hw) ice_free_hw_tbls(hw); mutex_destroy(&hw->tnl_lock); - /* Attempt to disable FW logging before shutting down control queues */ - ice_cfg_fw_log(hw, false); + ice_fwlog_deinit(hw); ice_destroy_all_ctrlq(hw); /* Clear VSI contexts if not already cleared */ @@ -1374,8 +1160,8 @@ int ice_check_reset(struct ice_hw *hw) * or EMPR has occurred. The grst delay value is in 100ms units. * Add 1sec for outstanding AQ commands that can take a long time. */ - grst_timeout = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >> - GLGEN_RSTCTL_GRSTDEL_S) + 10; + grst_timeout = FIELD_GET(GLGEN_RSTCTL_GRSTDEL_M, + rd32(hw, GLGEN_RSTCTL)) + 10; for (cnt = 0; cnt < grst_timeout; cnt++) { mdelay(100); @@ -2459,7 +2245,7 @@ ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, info->tmr_index_owned = ((number & ICE_TS_TMR_IDX_OWND_M) != 0); info->tmr_index_assoc = ((number & ICE_TS_TMR_IDX_ASSOC_M) != 0); - info->clk_freq = (number & ICE_TS_CLK_FREQ_M) >> ICE_TS_CLK_FREQ_S; + info->clk_freq = FIELD_GET(ICE_TS_CLK_FREQ_M, number); info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0); if (info->clk_freq < NUM_ICE_TIME_REF_FREQ) { @@ -2660,11 +2446,12 @@ ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, info->tmr0_owned = ((number & ICE_TS_TMR0_OWND_M) != 0); info->tmr0_ena = ((number & ICE_TS_TMR0_ENA_M) != 0); - info->tmr1_owner = (number & ICE_TS_TMR1_OWNR_M) >> ICE_TS_TMR1_OWNR_S; + info->tmr1_owner = FIELD_GET(ICE_TS_TMR1_OWNR_M, number); info->tmr1_owned = ((number & ICE_TS_TMR1_OWND_M) != 0); info->tmr1_ena = ((number & ICE_TS_TMR1_ENA_M) != 0); info->ts_ll_read = ((number & ICE_TS_LL_TX_TS_READ_M) != 0); + info->ts_ll_int_read = ((number & ICE_TS_LL_TX_TS_INT_READ_M) != 0); info->ena_ports = logical_id; info->tmr_own_map = phys_id; @@ -2685,6 +2472,8 @@ ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, info->tmr1_ena); ice_debug(hw, ICE_DBG_INIT, "dev caps: ts_ll_read = %u\n", info->ts_ll_read); + ice_debug(hw, ICE_DBG_INIT, "dev caps: ts_ll_int_read = %u\n", + info->ts_ll_int_read); ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 ena_ports = %u\n", info->ena_ports); ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr_own_map = %u\n", @@ -2710,6 +2499,26 @@ ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, dev_p->num_flow_director_fltr); } +/** + * ice_parse_sensor_reading_cap - Parse ICE_AQC_CAPS_SENSOR_READING cap + * @hw: pointer to the HW struct + * @dev_p: pointer to device capabilities structure + * @cap: capability element to parse + * + * Parse ICE_AQC_CAPS_SENSOR_READING for device capability for reading + * enabled sensors. + */ +static void +ice_parse_sensor_reading_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, + struct ice_aqc_list_caps_elem *cap) +{ + dev_p->supported_sensors = le32_to_cpu(cap->number); + + ice_debug(hw, ICE_DBG_INIT, + "dev caps: supported sensors (bitmap) = 0x%x\n", + dev_p->supported_sensors); +} + /** * ice_parse_dev_caps - Parse device capabilities * @hw: pointer to the HW struct @@ -2755,9 +2564,12 @@ ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, case ICE_AQC_CAPS_1588: ice_parse_1588_dev_caps(hw, dev_p, &cap_resp[i]); break; - case ICE_AQC_CAPS_FD: + case ICE_AQC_CAPS_FD: ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]); break; + case ICE_AQC_CAPS_SENSOR_READING: + ice_parse_sensor_reading_cap(hw, dev_p, &cap_resp[i]); + break; default: /* Don't list common capabilities as unknown */ if (!found) @@ -4072,6 +3884,7 @@ ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr, { struct ice_aqc_sff_eeprom *cmd; struct ice_aq_desc desc; + u16 i2c_bus_addr; int status; if (!data || (mem_addr & 0xff00)) @@ -4082,15 +3895,13 @@ ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr, desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD); cmd->lport_num = (u8)(lport & 0xff); cmd->lport_num_valid = (u8)((lport >> 8) & 0x01); - cmd->i2c_bus_addr = cpu_to_le16(((bus_addr >> 1) & - ICE_AQC_SFF_I2CBUS_7BIT_M) | - ((set_page << - ICE_AQC_SFF_SET_EEPROM_PAGE_S) & - ICE_AQC_SFF_SET_EEPROM_PAGE_M)); - cmd->i2c_mem_addr = cpu_to_le16(mem_addr & 0xff); - cmd->eeprom_page = cpu_to_le16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S); + i2c_bus_addr = FIELD_PREP(ICE_AQC_SFF_I2CBUS_7BIT_M, bus_addr >> 1) | + FIELD_PREP(ICE_AQC_SFF_SET_EEPROM_PAGE_M, set_page); if (write) - cmd->i2c_bus_addr |= cpu_to_le16(ICE_AQC_SFF_IS_WRITE); + i2c_bus_addr |= ICE_AQC_SFF_IS_WRITE; + cmd->i2c_bus_addr = cpu_to_le16(i2c_bus_addr); + cmd->i2c_mem_addr = cpu_to_le16(mem_addr & 0xff); + cmd->eeprom_page = le16_encode_bits(page, ICE_AQC_SFF_EEPROM_PAGE_M); status = ice_aq_send_cmd(hw, &desc, data, length, cd); return status; @@ -4345,6 +4156,7 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, struct ice_aqc_dis_txq_item *item; struct ice_aqc_dis_txqs *cmd; struct ice_aq_desc desc; + u16 vmvf_and_timeout; u16 i, sz = 0; int status; @@ -4360,27 +4172,26 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, cmd->num_entries = num_qgrps; - cmd->vmvf_and_timeout = cpu_to_le16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) & - ICE_AQC_Q_DIS_TIMEOUT_M); + vmvf_and_timeout = FIELD_PREP(ICE_AQC_Q_DIS_TIMEOUT_M, 5); switch (rst_src) { case ICE_VM_RESET: cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET; - cmd->vmvf_and_timeout |= - cpu_to_le16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M); + vmvf_and_timeout |= vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M; break; case ICE_VF_RESET: cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET; /* In this case, FW expects vmvf_num to be absolute VF ID */ - cmd->vmvf_and_timeout |= - cpu_to_le16((vmvf_num + hw->func_caps.vf_base_id) & - ICE_AQC_Q_DIS_VMVF_NUM_M); + vmvf_and_timeout |= (vmvf_num + hw->func_caps.vf_base_id) & + ICE_AQC_Q_DIS_VMVF_NUM_M; break; case ICE_NO_RESET: default: break; } + cmd->vmvf_and_timeout = cpu_to_le16(vmvf_and_timeout); + /* flush pipe on time out */ cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE; /* If no queue group info, we are in a reset flow. Issue the AQ */ @@ -4455,10 +4266,8 @@ ice_aq_cfg_lan_txq(struct ice_hw *hw, struct ice_aqc_cfg_txqs_buf *buf, cmd->cmd_type = ICE_AQC_Q_CFG_TC_CHNG; cmd->num_qs = num_qs; cmd->port_num_chng = (oldport & ICE_AQC_Q_CFG_SRC_PRT_M); - cmd->port_num_chng |= (newport << ICE_AQC_Q_CFG_DST_PRT_S) & - ICE_AQC_Q_CFG_DST_PRT_M; - cmd->time_out = (5 << ICE_AQC_Q_CFG_TIMEOUT_S) & - ICE_AQC_Q_CFG_TIMEOUT_M; + cmd->port_num_chng |= FIELD_PREP(ICE_AQC_Q_CFG_DST_PRT_M, newport); + cmd->time_out = FIELD_PREP(ICE_AQC_Q_CFG_TIMEOUT_M, 5); cmd->blocked_cgds = 0; status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); @@ -5538,6 +5347,35 @@ ice_aq_get_phy_rec_clk_out(struct ice_hw *hw, u8 *phy_output, u8 *port_num, return status; } +/** + * ice_aq_get_sensor_reading + * @hw: pointer to the HW struct + * @data: pointer to data to be read from the sensor + * + * Get sensor reading (0x0632) + */ +int ice_aq_get_sensor_reading(struct ice_hw *hw, + struct ice_aqc_get_sensor_reading_resp *data) +{ + struct ice_aqc_get_sensor_reading *cmd; + struct ice_aq_desc desc; + int status; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sensor_reading); + cmd = &desc.params.get_sensor_reading; +#define ICE_INTERNAL_TEMP_SENSOR_FORMAT 0 +#define ICE_INTERNAL_TEMP_SENSOR 0 + cmd->sensor = ICE_INTERNAL_TEMP_SENSOR; + cmd->format = ICE_INTERNAL_TEMP_SENSOR_FORMAT; + + status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); + if (!status) + memcpy(data, &desc.params.get_sensor_reading_resp, + sizeof(*data)); + + return status; +} + /** * ice_replay_pre_init - replay pre initialization * @hw: pointer to the HW struct @@ -5933,7 +5771,7 @@ ice_get_link_default_override(struct ice_link_default_override_tlv *ldo, ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); return status; } - ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M; + ldo->options = FIELD_GET(ICE_LINK_OVERRIDE_OPT_M, buf); ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >> ICE_LINK_OVERRIDE_PHY_CFG_S; diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h index 31fdcac339..3e933f75e9 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.h +++ b/drivers/net/ethernet/intel/ice/ice_common.h @@ -6,6 +6,7 @@ #include +#include "ice.h" #include "ice_type.h" #include "ice_nvm.h" #include "ice_flex_pipe.h" @@ -199,7 +200,6 @@ ice_aq_cfg_lan_txq(struct ice_hw *hw, struct ice_aqc_cfg_txqs_buf *buf, struct ice_sq_cd *cd); int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle); void ice_replay_post(struct ice_hw *hw); -void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf); struct ice_q_ctx * ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle); int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in); @@ -241,6 +241,8 @@ ice_aq_set_phy_rec_clk_out(struct ice_hw *hw, u8 phy_output, bool enable, int ice_aq_get_phy_rec_clk_out(struct ice_hw *hw, u8 *phy_output, u8 *port_num, u8 *flags, u16 *node_handle); +int ice_aq_get_sensor_reading(struct ice_hw *hw, + struct ice_aqc_get_sensor_reading_resp *data); void ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat); diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c index 396e555023..74418c445c 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb.c @@ -35,8 +35,7 @@ ice_aq_get_lldp_mib(struct ice_hw *hw, u8 bridge_type, u8 mib_type, void *buf, ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_get_mib); cmd->type = mib_type & ICE_AQ_LLDP_MIB_TYPE_M; - cmd->type |= (bridge_type << ICE_AQ_LLDP_BRID_TYPE_S) & - ICE_AQ_LLDP_BRID_TYPE_M; + cmd->type |= FIELD_PREP(ICE_AQ_LLDP_BRID_TYPE_M, bridge_type); desc.datalen = cpu_to_le16(buf_size); @@ -147,8 +146,7 @@ static u8 ice_get_dcbx_status(struct ice_hw *hw) u32 reg; reg = rd32(hw, PRTDCB_GENS); - return (u8)((reg & PRTDCB_GENS_DCBX_STATUS_M) >> - PRTDCB_GENS_DCBX_STATUS_S); + return FIELD_GET(PRTDCB_GENS_DCBX_STATUS_M, reg); } /** @@ -174,11 +172,9 @@ ice_parse_ieee_ets_common_tlv(u8 *buf, struct ice_dcb_ets_cfg *ets_cfg) */ for (i = 0; i < 4; i++) { ets_cfg->prio_table[i * 2] = - ((buf[offset] & ICE_IEEE_ETS_PRIO_1_M) >> - ICE_IEEE_ETS_PRIO_1_S); + FIELD_GET(ICE_IEEE_ETS_PRIO_1_M, buf[offset]); ets_cfg->prio_table[i * 2 + 1] = - ((buf[offset] & ICE_IEEE_ETS_PRIO_0_M) >> - ICE_IEEE_ETS_PRIO_0_S); + FIELD_GET(ICE_IEEE_ETS_PRIO_0_M, buf[offset]); offset++; } @@ -222,11 +218,9 @@ ice_parse_ieee_etscfg_tlv(struct ice_lldp_org_tlv *tlv, * |1bit | 1bit|3 bits|3bits| */ etscfg = &dcbcfg->etscfg; - etscfg->willing = ((buf[0] & ICE_IEEE_ETS_WILLING_M) >> - ICE_IEEE_ETS_WILLING_S); - etscfg->cbs = ((buf[0] & ICE_IEEE_ETS_CBS_M) >> ICE_IEEE_ETS_CBS_S); - etscfg->maxtcs = ((buf[0] & ICE_IEEE_ETS_MAXTC_M) >> - ICE_IEEE_ETS_MAXTC_S); + etscfg->willing = FIELD_GET(ICE_IEEE_ETS_WILLING_M, buf[0]); + etscfg->cbs = FIELD_GET(ICE_IEEE_ETS_CBS_M, buf[0]); + etscfg->maxtcs = FIELD_GET(ICE_IEEE_ETS_MAXTC_M, buf[0]); /* Begin parsing at Priority Assignment Table (offset 1 in buf) */ ice_parse_ieee_ets_common_tlv(&buf[1], etscfg); @@ -268,11 +262,9 @@ ice_parse_ieee_pfccfg_tlv(struct ice_lldp_org_tlv *tlv, * ----------------------------------------- * |1bit | 1bit|2 bits|4bits| 1 octet | */ - dcbcfg->pfc.willing = ((buf[0] & ICE_IEEE_PFC_WILLING_M) >> - ICE_IEEE_PFC_WILLING_S); - dcbcfg->pfc.mbc = ((buf[0] & ICE_IEEE_PFC_MBC_M) >> ICE_IEEE_PFC_MBC_S); - dcbcfg->pfc.pfccap = ((buf[0] & ICE_IEEE_PFC_CAP_M) >> - ICE_IEEE_PFC_CAP_S); + dcbcfg->pfc.willing = FIELD_GET(ICE_IEEE_PFC_WILLING_M, buf[0]); + dcbcfg->pfc.mbc = FIELD_GET(ICE_IEEE_PFC_MBC_M, buf[0]); + dcbcfg->pfc.pfccap = FIELD_GET(ICE_IEEE_PFC_CAP_M, buf[0]); dcbcfg->pfc.pfcena = buf[1]; } @@ -294,7 +286,7 @@ ice_parse_ieee_app_tlv(struct ice_lldp_org_tlv *tlv, u8 *buf; typelen = ntohs(tlv->typelen); - len = ((typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S); + len = FIELD_GET(ICE_LLDP_TLV_LEN_M, typelen); buf = tlv->tlvinfo; /* Removing sizeof(ouisubtype) and reserved byte from len. @@ -314,12 +306,10 @@ ice_parse_ieee_app_tlv(struct ice_lldp_org_tlv *tlv, * ----------------------------------------- */ while (offset < len) { - dcbcfg->app[i].priority = ((buf[offset] & - ICE_IEEE_APP_PRIO_M) >> - ICE_IEEE_APP_PRIO_S); - dcbcfg->app[i].selector = ((buf[offset] & - ICE_IEEE_APP_SEL_M) >> - ICE_IEEE_APP_SEL_S); + dcbcfg->app[i].priority = FIELD_GET(ICE_IEEE_APP_PRIO_M, + buf[offset]); + dcbcfg->app[i].selector = FIELD_GET(ICE_IEEE_APP_SEL_M, + buf[offset]); dcbcfg->app[i].prot_id = (buf[offset + 1] << 0x8) | buf[offset + 2]; /* Move to next app */ @@ -347,8 +337,7 @@ ice_parse_ieee_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg) u8 subtype; ouisubtype = ntohl(tlv->ouisubtype); - subtype = (u8)((ouisubtype & ICE_LLDP_TLV_SUBTYPE_M) >> - ICE_LLDP_TLV_SUBTYPE_S); + subtype = FIELD_GET(ICE_LLDP_TLV_SUBTYPE_M, ouisubtype); switch (subtype) { case ICE_IEEE_SUBTYPE_ETS_CFG: ice_parse_ieee_etscfg_tlv(tlv, dcbcfg); @@ -399,11 +388,9 @@ ice_parse_cee_pgcfg_tlv(struct ice_cee_feat_tlv *tlv, */ for (i = 0; i < 4; i++) { etscfg->prio_table[i * 2] = - ((buf[offset] & ICE_CEE_PGID_PRIO_1_M) >> - ICE_CEE_PGID_PRIO_1_S); + FIELD_GET(ICE_CEE_PGID_PRIO_1_M, buf[offset]); etscfg->prio_table[i * 2 + 1] = - ((buf[offset] & ICE_CEE_PGID_PRIO_0_M) >> - ICE_CEE_PGID_PRIO_0_S); + FIELD_GET(ICE_CEE_PGID_PRIO_0_M, buf[offset]); offset++; } @@ -466,7 +453,7 @@ ice_parse_cee_app_tlv(struct ice_cee_feat_tlv *tlv, struct ice_dcbx_cfg *dcbcfg) u8 i; typelen = ntohs(tlv->hdr.typelen); - len = ((typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S); + len = FIELD_GET(ICE_LLDP_TLV_LEN_M, typelen); dcbcfg->numapps = len / sizeof(*app); if (!dcbcfg->numapps) @@ -521,14 +508,13 @@ ice_parse_cee_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg) u32 ouisubtype; ouisubtype = ntohl(tlv->ouisubtype); - subtype = (u8)((ouisubtype & ICE_LLDP_TLV_SUBTYPE_M) >> - ICE_LLDP_TLV_SUBTYPE_S); + subtype = FIELD_GET(ICE_LLDP_TLV_SUBTYPE_M, ouisubtype); /* Return if not CEE DCBX */ if (subtype != ICE_CEE_DCBX_TYPE) return; typelen = ntohs(tlv->typelen); - tlvlen = ((typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S); + tlvlen = FIELD_GET(ICE_LLDP_TLV_LEN_M, typelen); len = sizeof(tlv->typelen) + sizeof(ouisubtype) + sizeof(struct ice_cee_ctrl_tlv); /* Return if no CEE DCBX Feature TLVs */ @@ -540,9 +526,8 @@ ice_parse_cee_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg) u16 sublen; typelen = ntohs(sub_tlv->hdr.typelen); - sublen = ((typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S); - subtype = (u8)((typelen & ICE_LLDP_TLV_TYPE_M) >> - ICE_LLDP_TLV_TYPE_S); + sublen = FIELD_GET(ICE_LLDP_TLV_LEN_M, typelen); + subtype = FIELD_GET(ICE_LLDP_TLV_TYPE_M, typelen); switch (subtype) { case ICE_CEE_SUBTYPE_PG_CFG: ice_parse_cee_pgcfg_tlv(sub_tlv, dcbcfg); @@ -579,7 +564,7 @@ ice_parse_org_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg) u32 oui; ouisubtype = ntohl(tlv->ouisubtype); - oui = ((ouisubtype & ICE_LLDP_TLV_OUI_M) >> ICE_LLDP_TLV_OUI_S); + oui = FIELD_GET(ICE_LLDP_TLV_OUI_M, ouisubtype); switch (oui) { case ICE_IEEE_8021QAZ_OUI: ice_parse_ieee_tlv(tlv, dcbcfg); @@ -616,8 +601,8 @@ static int ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg) tlv = (struct ice_lldp_org_tlv *)lldpmib; while (1) { typelen = ntohs(tlv->typelen); - type = ((typelen & ICE_LLDP_TLV_TYPE_M) >> ICE_LLDP_TLV_TYPE_S); - len = ((typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S); + type = FIELD_GET(ICE_LLDP_TLV_TYPE_M, typelen); + len = FIELD_GET(ICE_LLDP_TLV_LEN_M, typelen); offset += sizeof(typelen) + len; /* END TLV or beyond LLDPDU size */ @@ -806,11 +791,11 @@ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg, */ for (i = 0; i < ICE_MAX_TRAFFIC_CLASS / 2; i++) { dcbcfg->etscfg.prio_table[i * 2] = - ((cee_cfg->oper_prio_tc[i] & ICE_CEE_PGID_PRIO_0_M) >> - ICE_CEE_PGID_PRIO_0_S); + FIELD_GET(ICE_CEE_PGID_PRIO_0_M, + cee_cfg->oper_prio_tc[i]); dcbcfg->etscfg.prio_table[i * 2 + 1] = - ((cee_cfg->oper_prio_tc[i] & ICE_CEE_PGID_PRIO_1_M) >> - ICE_CEE_PGID_PRIO_1_S); + FIELD_GET(ICE_CEE_PGID_PRIO_1_M, + cee_cfg->oper_prio_tc[i]); } ice_for_each_traffic_class(i) { @@ -982,7 +967,7 @@ void ice_get_dcb_cfg_from_mib_change(struct ice_port_info *pi, mib = (struct ice_aqc_lldp_get_mib *)&event->desc.params.raw; - change_type = FIELD_GET(ICE_AQ_LLDP_MIB_TYPE_M, mib->type); + change_type = FIELD_GET(ICE_AQ_LLDP_MIB_TYPE_M, mib->type); if (change_type == ICE_AQ_LLDP_MIB_REMOTE) dcbx_cfg = &pi->qos_cfg.remote_dcbx_cfg; @@ -1483,7 +1468,7 @@ ice_dcb_cfg_to_lldp(u8 *lldpmib, u16 *miblen, struct ice_dcbx_cfg *dcbcfg) while (1) { ice_add_dcb_tlv(tlv, dcbcfg, tlvid++); typelen = ntohs(tlv->typelen); - len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S; + len = FIELD_GET(ICE_LLDP_TLV_LEN_M, typelen); if (len) offset += len + 2; /* END TLV or beyond LLDPDU size */ diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c index 850db8e0e6..6e20ee6100 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c @@ -934,7 +934,7 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring *tx_ring, skb->priority != TC_PRIO_CONTROL) { first->vid &= ~VLAN_PRIO_MASK; /* Mask the lower 3 bits to set the 802.1p priority */ - first->vid |= (skb->priority << VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK; + first->vid |= FIELD_PREP(VLAN_PRIO_MASK, skb->priority); /* if this is not already set it means a VLAN 0 + priority needs * to be offloaded */ diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c index e1fbc6de45..6d50b90a73 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c @@ -227,7 +227,7 @@ static void ice_get_pfc_delay(struct ice_hw *hw, u16 *delay) u32 val; val = rd32(hw, PRTDCB_GENC); - *delay = (u16)((val & PRTDCB_GENC_PFCLDA_M) >> PRTDCB_GENC_PFCLDA_S); + *delay = FIELD_GET(PRTDCB_GENC_PFCLDA_M, val); } /** diff --git a/drivers/net/ethernet/intel/ice/ice_debugfs.c b/drivers/net/ethernet/intel/ice/ice_debugfs.c new file mode 100644 index 0000000000..c2bfba6b9e --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_debugfs.c @@ -0,0 +1,667 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022, Intel Corporation. */ + +#include +#include +#include +#include +#include "ice.h" + +static struct dentry *ice_debugfs_root; + +/* create a define that has an extra module that doesn't really exist. this + * is so we can add a module 'all' to easily enable/disable all the modules + */ +#define ICE_NR_FW_LOG_MODULES (ICE_AQC_FW_LOG_ID_MAX + 1) + +/* the ordering in this array is important. it matches the ordering of the + * values in the FW so the index is the same value as in ice_aqc_fw_logging_mod + */ +static const char * const ice_fwlog_module_string[] = { + "general", + "ctrl", + "link", + "link_topo", + "dnl", + "i2c", + "sdp", + "mdio", + "adminq", + "hdma", + "lldp", + "dcbx", + "dcb", + "xlr", + "nvm", + "auth", + "vpd", + "iosf", + "parser", + "sw", + "scheduler", + "txq", + "rsvd", + "post", + "watchdog", + "task_dispatch", + "mng", + "synce", + "health", + "tsdrv", + "pfreg", + "mdlver", + "all", +}; + +/* the ordering in this array is important. it matches the ordering of the + * values in the FW so the index is the same value as in ice_fwlog_level + */ +static const char * const ice_fwlog_level_string[] = { + "none", + "error", + "warning", + "normal", + "verbose", +}; + +/* the order in this array is important. it matches the ordering of the + * values in the FW so the index is the same value as in ice_fwlog_level + */ +static const char * const ice_fwlog_log_size[] = { + "128K", + "256K", + "512K", + "1M", + "2M", +}; + +/** + * ice_fwlog_print_module_cfg - print current FW logging module configuration + * @hw: pointer to the HW structure + * @module: module to print + * @s: the seq file to put data into + */ +static void +ice_fwlog_print_module_cfg(struct ice_hw *hw, int module, struct seq_file *s) +{ + struct ice_fwlog_cfg *cfg = &hw->fwlog_cfg; + struct ice_fwlog_module_entry *entry; + + if (module != ICE_AQC_FW_LOG_ID_MAX) { + entry = &cfg->module_entries[module]; + + seq_printf(s, "\tModule: %s, Log Level: %s\n", + ice_fwlog_module_string[entry->module_id], + ice_fwlog_level_string[entry->log_level]); + } else { + int i; + + for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) { + entry = &cfg->module_entries[i]; + + seq_printf(s, "\tModule: %s, Log Level: %s\n", + ice_fwlog_module_string[entry->module_id], + ice_fwlog_level_string[entry->log_level]); + } + } +} + +static int ice_find_module_by_dentry(struct ice_pf *pf, struct dentry *d) +{ + int i, module; + + module = -1; + /* find the module based on the dentry */ + for (i = 0; i < ICE_NR_FW_LOG_MODULES; i++) { + if (d == pf->ice_debugfs_pf_fwlog_modules[i]) { + module = i; + break; + } + } + + return module; +} + +/** + * ice_debugfs_module_show - read from 'module' file + * @s: the opened file + * @v: pointer to the offset + */ +static int ice_debugfs_module_show(struct seq_file *s, void *v) +{ + const struct file *filp = s->file; + struct dentry *dentry; + struct ice_pf *pf; + int module; + + dentry = file_dentry(filp); + pf = s->private; + + module = ice_find_module_by_dentry(pf, dentry); + if (module < 0) { + dev_info(ice_pf_to_dev(pf), "unknown module\n"); + return -EINVAL; + } + + ice_fwlog_print_module_cfg(&pf->hw, module, s); + + return 0; +} + +static int ice_debugfs_module_open(struct inode *inode, struct file *filp) +{ + return single_open(filp, ice_debugfs_module_show, inode->i_private); +} + +/** + * ice_debugfs_module_write - write into 'module' file + * @filp: the opened file + * @buf: where to find the user's data + * @count: the length of the user's data + * @ppos: file position offset + */ +static ssize_t +ice_debugfs_module_write(struct file *filp, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct ice_pf *pf = file_inode(filp)->i_private; + struct dentry *dentry = file_dentry(filp); + struct device *dev = ice_pf_to_dev(pf); + char user_val[16], *cmd_buf; + int module, log_level, cnt; + + /* don't allow partial writes or invalid input */ + if (*ppos != 0 || count > 8) + return -EINVAL; + + cmd_buf = memdup_user(buf, count); + if (IS_ERR(cmd_buf)) + return PTR_ERR(cmd_buf); + + module = ice_find_module_by_dentry(pf, dentry); + if (module < 0) { + dev_info(dev, "unknown module\n"); + return -EINVAL; + } + + cnt = sscanf(cmd_buf, "%s", user_val); + if (cnt != 1) + return -EINVAL; + + log_level = sysfs_match_string(ice_fwlog_level_string, user_val); + if (log_level < 0) { + dev_info(dev, "unknown log level '%s'\n", user_val); + return -EINVAL; + } + + if (module != ICE_AQC_FW_LOG_ID_MAX) { + ice_pf_fwlog_update_module(pf, log_level, module); + } else { + /* the module 'all' is a shortcut so that we can set + * all of the modules to the same level quickly + */ + int i; + + for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) + ice_pf_fwlog_update_module(pf, log_level, i); + } + + return count; +} + +static const struct file_operations ice_debugfs_module_fops = { + .owner = THIS_MODULE, + .open = ice_debugfs_module_open, + .read = seq_read, + .release = single_release, + .write = ice_debugfs_module_write, +}; + +/** + * ice_debugfs_nr_messages_read - read from 'nr_messages' file + * @filp: the opened file + * @buffer: where to write the data for the user to read + * @count: the size of the user's buffer + * @ppos: file position offset + */ +static ssize_t ice_debugfs_nr_messages_read(struct file *filp, + char __user *buffer, size_t count, + loff_t *ppos) +{ + struct ice_pf *pf = filp->private_data; + struct ice_hw *hw = &pf->hw; + char buff[32] = {}; + + snprintf(buff, sizeof(buff), "%d\n", + hw->fwlog_cfg.log_resolution); + + return simple_read_from_buffer(buffer, count, ppos, buff, strlen(buff)); +} + +/** + * ice_debugfs_nr_messages_write - write into 'nr_messages' file + * @filp: the opened file + * @buf: where to find the user's data + * @count: the length of the user's data + * @ppos: file position offset + */ +static ssize_t +ice_debugfs_nr_messages_write(struct file *filp, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct ice_pf *pf = filp->private_data; + struct device *dev = ice_pf_to_dev(pf); + struct ice_hw *hw = &pf->hw; + char user_val[8], *cmd_buf; + s16 nr_messages; + ssize_t ret; + + /* don't allow partial writes or invalid input */ + if (*ppos != 0 || count > 4) + return -EINVAL; + + cmd_buf = memdup_user(buf, count); + if (IS_ERR(cmd_buf)) + return PTR_ERR(cmd_buf); + + ret = sscanf(cmd_buf, "%s", user_val); + if (ret != 1) + return -EINVAL; + + ret = kstrtos16(user_val, 0, &nr_messages); + if (ret) + return ret; + + if (nr_messages < ICE_AQC_FW_LOG_MIN_RESOLUTION || + nr_messages > ICE_AQC_FW_LOG_MAX_RESOLUTION) { + dev_err(dev, "Invalid FW log number of messages %d, value must be between %d - %d\n", + nr_messages, ICE_AQC_FW_LOG_MIN_RESOLUTION, + ICE_AQC_FW_LOG_MAX_RESOLUTION); + return -EINVAL; + } + + hw->fwlog_cfg.log_resolution = nr_messages; + + return count; +} + +static const struct file_operations ice_debugfs_nr_messages_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = ice_debugfs_nr_messages_read, + .write = ice_debugfs_nr_messages_write, +}; + +/** + * ice_debugfs_enable_read - read from 'enable' file + * @filp: the opened file + * @buffer: where to write the data for the user to read + * @count: the size of the user's buffer + * @ppos: file position offset + */ +static ssize_t ice_debugfs_enable_read(struct file *filp, + char __user *buffer, size_t count, + loff_t *ppos) +{ + struct ice_pf *pf = filp->private_data; + struct ice_hw *hw = &pf->hw; + char buff[32] = {}; + + snprintf(buff, sizeof(buff), "%u\n", + (u16)(hw->fwlog_cfg.options & + ICE_FWLOG_OPTION_IS_REGISTERED) >> 3); + + return simple_read_from_buffer(buffer, count, ppos, buff, strlen(buff)); +} + +/** + * ice_debugfs_enable_write - write into 'enable' file + * @filp: the opened file + * @buf: where to find the user's data + * @count: the length of the user's data + * @ppos: file position offset + */ +static ssize_t +ice_debugfs_enable_write(struct file *filp, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct ice_pf *pf = filp->private_data; + struct ice_hw *hw = &pf->hw; + char user_val[8], *cmd_buf; + bool enable; + ssize_t ret; + + /* don't allow partial writes or invalid input */ + if (*ppos != 0 || count > 2) + return -EINVAL; + + cmd_buf = memdup_user(buf, count); + if (IS_ERR(cmd_buf)) + return PTR_ERR(cmd_buf); + + ret = sscanf(cmd_buf, "%s", user_val); + if (ret != 1) + return -EINVAL; + + ret = kstrtobool(user_val, &enable); + if (ret) + goto enable_write_error; + + if (enable) + hw->fwlog_cfg.options |= ICE_FWLOG_OPTION_ARQ_ENA; + else + hw->fwlog_cfg.options &= ~ICE_FWLOG_OPTION_ARQ_ENA; + + ret = ice_fwlog_set(hw, &hw->fwlog_cfg); + if (ret) + goto enable_write_error; + + if (enable) + ret = ice_fwlog_register(hw); + else + ret = ice_fwlog_unregister(hw); + + if (ret) + goto enable_write_error; + + /* if we get here, nothing went wrong; return count since we didn't + * really write anything + */ + ret = (ssize_t)count; + +enable_write_error: + /* This function always consumes all of the written input, or produces + * an error. Check and enforce this. Otherwise, the write operation + * won't complete properly. + */ + if (WARN_ON(ret != (ssize_t)count && ret >= 0)) + ret = -EIO; + + return ret; +} + +static const struct file_operations ice_debugfs_enable_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = ice_debugfs_enable_read, + .write = ice_debugfs_enable_write, +}; + +/** + * ice_debugfs_log_size_read - read from 'log_size' file + * @filp: the opened file + * @buffer: where to write the data for the user to read + * @count: the size of the user's buffer + * @ppos: file position offset + */ +static ssize_t ice_debugfs_log_size_read(struct file *filp, + char __user *buffer, size_t count, + loff_t *ppos) +{ + struct ice_pf *pf = filp->private_data; + struct ice_hw *hw = &pf->hw; + char buff[32] = {}; + int index; + + index = hw->fwlog_ring.index; + snprintf(buff, sizeof(buff), "%s\n", ice_fwlog_log_size[index]); + + return simple_read_from_buffer(buffer, count, ppos, buff, strlen(buff)); +} + +/** + * ice_debugfs_log_size_write - write into 'log_size' file + * @filp: the opened file + * @buf: where to find the user's data + * @count: the length of the user's data + * @ppos: file position offset + */ +static ssize_t +ice_debugfs_log_size_write(struct file *filp, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct ice_pf *pf = filp->private_data; + struct device *dev = ice_pf_to_dev(pf); + struct ice_hw *hw = &pf->hw; + char user_val[8], *cmd_buf; + ssize_t ret; + int index; + + /* don't allow partial writes or invalid input */ + if (*ppos != 0 || count > 5) + return -EINVAL; + + cmd_buf = memdup_user(buf, count); + if (IS_ERR(cmd_buf)) + return PTR_ERR(cmd_buf); + + ret = sscanf(cmd_buf, "%s", user_val); + if (ret != 1) + return -EINVAL; + + index = sysfs_match_string(ice_fwlog_log_size, user_val); + if (index < 0) { + dev_info(dev, "Invalid log size '%s'. The value must be one of 128K, 256K, 512K, 1M, 2M\n", + user_val); + ret = -EINVAL; + goto log_size_write_error; + } else if (hw->fwlog_cfg.options & ICE_FWLOG_OPTION_IS_REGISTERED) { + dev_info(dev, "FW logging is currently running. Please disable FW logging to change log_size\n"); + ret = -EINVAL; + goto log_size_write_error; + } + + /* free all the buffers and the tracking info and resize */ + ice_fwlog_realloc_rings(hw, index); + + /* if we get here, nothing went wrong; return count since we didn't + * really write anything + */ + ret = (ssize_t)count; + +log_size_write_error: + /* This function always consumes all of the written input, or produces + * an error. Check and enforce this. Otherwise, the write operation + * won't complete properly. + */ + if (WARN_ON(ret != (ssize_t)count && ret >= 0)) + ret = -EIO; + + return ret; +} + +static const struct file_operations ice_debugfs_log_size_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = ice_debugfs_log_size_read, + .write = ice_debugfs_log_size_write, +}; + +/** + * ice_debugfs_data_read - read from 'data' file + * @filp: the opened file + * @buffer: where to write the data for the user to read + * @count: the size of the user's buffer + * @ppos: file position offset + */ +static ssize_t ice_debugfs_data_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + struct ice_pf *pf = filp->private_data; + struct ice_hw *hw = &pf->hw; + int data_copied = 0; + bool done = false; + + if (ice_fwlog_ring_empty(&hw->fwlog_ring)) + return 0; + + while (!ice_fwlog_ring_empty(&hw->fwlog_ring) && !done) { + struct ice_fwlog_data *log; + u16 cur_buf_len; + + log = &hw->fwlog_ring.rings[hw->fwlog_ring.head]; + cur_buf_len = log->data_size; + if (cur_buf_len >= count) { + done = true; + continue; + } + + if (copy_to_user(buffer, log->data, cur_buf_len)) { + /* if there is an error then bail and return whatever + * the driver has copied so far + */ + done = true; + continue; + } + + data_copied += cur_buf_len; + buffer += cur_buf_len; + count -= cur_buf_len; + *ppos += cur_buf_len; + ice_fwlog_ring_increment(&hw->fwlog_ring.head, + hw->fwlog_ring.size); + } + + return data_copied; +} + +/** + * ice_debugfs_data_write - write into 'data' file + * @filp: the opened file + * @buf: where to find the user's data + * @count: the length of the user's data + * @ppos: file position offset + */ +static ssize_t +ice_debugfs_data_write(struct file *filp, const char __user *buf, size_t count, + loff_t *ppos) +{ + struct ice_pf *pf = filp->private_data; + struct device *dev = ice_pf_to_dev(pf); + struct ice_hw *hw = &pf->hw; + ssize_t ret; + + /* don't allow partial writes */ + if (*ppos != 0) + return 0; + + /* any value is allowed to clear the buffer so no need to even look at + * what the value is + */ + if (!(hw->fwlog_cfg.options & ICE_FWLOG_OPTION_IS_REGISTERED)) { + hw->fwlog_ring.head = 0; + hw->fwlog_ring.tail = 0; + } else { + dev_info(dev, "Can't clear FW log data while FW log running\n"); + ret = -EINVAL; + goto nr_buffs_write_error; + } + + /* if we get here, nothing went wrong; return count since we didn't + * really write anything + */ + ret = (ssize_t)count; + +nr_buffs_write_error: + /* This function always consumes all of the written input, or produces + * an error. Check and enforce this. Otherwise, the write operation + * won't complete properly. + */ + if (WARN_ON(ret != (ssize_t)count && ret >= 0)) + ret = -EIO; + + return ret; +} + +static const struct file_operations ice_debugfs_data_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = ice_debugfs_data_read, + .write = ice_debugfs_data_write, +}; + +/** + * ice_debugfs_fwlog_init - setup the debugfs directory + * @pf: the ice that is starting up + */ +void ice_debugfs_fwlog_init(struct ice_pf *pf) +{ + const char *name = pci_name(pf->pdev); + struct dentry *fw_modules_dir; + struct dentry **fw_modules; + int i; + + /* only support fw log commands on PF 0 */ + if (pf->hw.bus.func) + return; + + /* allocate space for this first because if it fails then we don't + * need to unwind + */ + fw_modules = kcalloc(ICE_NR_FW_LOG_MODULES, sizeof(*fw_modules), + GFP_KERNEL); + if (!fw_modules) + return; + + pf->ice_debugfs_pf = debugfs_create_dir(name, ice_debugfs_root); + if (IS_ERR(pf->ice_debugfs_pf)) + goto err_create_module_files; + + pf->ice_debugfs_pf_fwlog = debugfs_create_dir("fwlog", + pf->ice_debugfs_pf); + if (IS_ERR(pf->ice_debugfs_pf)) + goto err_create_module_files; + + fw_modules_dir = debugfs_create_dir("modules", + pf->ice_debugfs_pf_fwlog); + if (IS_ERR(fw_modules_dir)) + goto err_create_module_files; + + for (i = 0; i < ICE_NR_FW_LOG_MODULES; i++) { + fw_modules[i] = debugfs_create_file(ice_fwlog_module_string[i], + 0600, fw_modules_dir, pf, + &ice_debugfs_module_fops); + if (IS_ERR(fw_modules[i])) + goto err_create_module_files; + } + + debugfs_create_file("nr_messages", 0600, + pf->ice_debugfs_pf_fwlog, pf, + &ice_debugfs_nr_messages_fops); + + pf->ice_debugfs_pf_fwlog_modules = fw_modules; + + debugfs_create_file("enable", 0600, pf->ice_debugfs_pf_fwlog, + pf, &ice_debugfs_enable_fops); + + debugfs_create_file("log_size", 0600, pf->ice_debugfs_pf_fwlog, + pf, &ice_debugfs_log_size_fops); + + debugfs_create_file("data", 0600, pf->ice_debugfs_pf_fwlog, + pf, &ice_debugfs_data_fops); + + return; + +err_create_module_files: + debugfs_remove_recursive(pf->ice_debugfs_pf_fwlog); + kfree(fw_modules); +} + +/** + * ice_debugfs_init - create root directory for debugfs entries + */ +void ice_debugfs_init(void) +{ + ice_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); + if (IS_ERR(ice_debugfs_root)) + pr_info("init of debugfs failed\n"); +} + +/** + * ice_debugfs_exit - remove debugfs entries + */ +void ice_debugfs_exit(void) +{ + debugfs_remove_recursive(ice_debugfs_root); + ice_debugfs_root = NULL; +} diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c index 80dc5445b5..65be56f2af 100644 --- a/drivers/net/ethernet/intel/ice/ice_devlink.c +++ b/drivers/net/ethernet/intel/ice/ice_devlink.c @@ -193,6 +193,24 @@ ice_info_pending_netlist_build(struct ice_pf __always_unused *pf, snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", netlist->hash); } +static void ice_info_cgu_fw_build(struct ice_pf *pf, struct ice_info_ctx *ctx) +{ + u32 id, cfg_ver, fw_ver; + + if (!ice_is_feature_supported(pf, ICE_F_CGU)) + return; + if (ice_aq_get_cgu_info(&pf->hw, &id, &cfg_ver, &fw_ver)) + return; + snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", id, cfg_ver, fw_ver); +} + +static void ice_info_cgu_id(struct ice_pf *pf, struct ice_info_ctx *ctx) +{ + if (!ice_is_feature_supported(pf, ICE_F_CGU)) + return; + snprintf(ctx->buf, sizeof(ctx->buf), "%u", pf->hw.cgu_part_number); +} + #define fixed(key, getter) { ICE_VERSION_FIXED, key, getter, NULL } #define running(key, getter) { ICE_VERSION_RUNNING, key, getter, NULL } #define stored(key, getter, fallback) { ICE_VERSION_STORED, key, getter, fallback } @@ -235,6 +253,8 @@ static const struct ice_devlink_version { running("fw.app.bundle_id", ice_info_ddp_pkg_bundle_id), combined("fw.netlist", ice_info_netlist_ver, ice_info_pending_netlist_ver), combined("fw.netlist.build", ice_info_netlist_build, ice_info_pending_netlist_build), + fixed("cgu.id", ice_info_cgu_id), + running("fw.cgu", ice_info_cgu_fw_build), }; /** @@ -810,6 +830,10 @@ static void ice_traverse_tx_tree(struct devlink *devlink, struct ice_sched_node struct ice_vf *vf; int i; + if (node->rate_node) + /* already added, skip to the next */ + goto traverse_children; + if (node->parent == tc_node) { /* create root node */ rate_node = devl_rate_node_create(devlink, node, node->name, NULL); @@ -831,6 +855,7 @@ static void ice_traverse_tx_tree(struct devlink *devlink, struct ice_sched_node if (rate_node && !IS_ERR(rate_node)) node->rate_node = rate_node; +traverse_children: for (i = 0; i < node->num_children; i++) ice_traverse_tx_tree(devlink, node->children[i], tc_node, pf); } @@ -861,6 +886,30 @@ int ice_devlink_rate_init_tx_topology(struct devlink *devlink, struct ice_vsi *v return 0; } +static void ice_clear_rate_nodes(struct ice_sched_node *node) +{ + node->rate_node = NULL; + + for (int i = 0; i < node->num_children; i++) + ice_clear_rate_nodes(node->children[i]); +} + +/** + * ice_devlink_rate_clear_tx_topology - clear node->rate_node + * @vsi: main vsi struct + * + * Clear rate_node to cleanup creation of Tx topology. + * + */ +void ice_devlink_rate_clear_tx_topology(struct ice_vsi *vsi) +{ + struct ice_port_info *pi = vsi->port_info; + + mutex_lock(&pi->sched_lock); + ice_clear_rate_nodes(pi->root->children[0]); + mutex_unlock(&pi->sched_lock); +} + /** * ice_set_object_tx_share - sets node scheduling parameter * @pi: devlink struct instance diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.h b/drivers/net/ethernet/intel/ice/ice_devlink.h index 6ec96779f5..d291c0e2e1 100644 --- a/drivers/net/ethernet/intel/ice/ice_devlink.h +++ b/drivers/net/ethernet/intel/ice/ice_devlink.h @@ -20,5 +20,6 @@ void ice_devlink_destroy_regions(struct ice_pf *pf); int ice_devlink_rate_init_tx_topology(struct devlink *devlink, struct ice_vsi *vsi); void ice_tear_down_devlink_rate_tree(struct ice_pf *pf); +void ice_devlink_rate_clear_tx_topology(struct ice_vsi *vsi); #endif /* _ICE_DEVLINK_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.c b/drivers/net/ethernet/intel/ice/ice_dpll.c index 68b894bb68..bd9b1fed74 100644 --- a/drivers/net/ethernet/intel/ice/ice_dpll.c +++ b/drivers/net/ethernet/intel/ice/ice_dpll.c @@ -551,31 +551,6 @@ ice_dpll_lock_status_get(const struct dpll_device *dpll, void *dpll_priv, return 0; } -/** - * ice_dpll_mode_supported - check if dpll's working mode is supported - * @dpll: registered dpll pointer - * @dpll_priv: private data pointer passed on dpll registration - * @mode: mode to be checked for support - * @extack: error reporting - * - * Dpll subsystem callback. Provides information if working mode is supported - * by dpll. - * - * Return: - * * true - mode is supported - * * false - mode is not supported - */ -static bool ice_dpll_mode_supported(const struct dpll_device *dpll, - void *dpll_priv, - enum dpll_mode mode, - struct netlink_ext_ack *extack) -{ - if (mode == DPLL_MODE_AUTOMATIC) - return true; - - return false; -} - /** * ice_dpll_mode_get - get dpll's working mode * @dpll: registered dpll pointer @@ -1259,7 +1234,6 @@ static const struct dpll_pin_ops ice_dpll_output_ops = { static const struct dpll_device_ops ice_dpll_ops = { .lock_status_get = ice_dpll_lock_status_get, - .mode_supported = ice_dpll_mode_supported, .mode_get = ice_dpll_mode_get, }; @@ -1623,7 +1597,7 @@ static void ice_dpll_deinit_rclk_pin(struct ice_pf *pf) } if (WARN_ON_ONCE(!vsi || !vsi->netdev)) return; - netdev_dpll_pin_clear(vsi->netdev); + dpll_netdev_pin_clear(vsi->netdev); dpll_pin_put(rclk->pin); } @@ -1667,7 +1641,7 @@ ice_dpll_init_rclk_pins(struct ice_pf *pf, struct ice_dpll_pin *pin, } if (WARN_ON((!vsi || !vsi->netdev))) return -EINVAL; - netdev_dpll_pin_set(vsi->netdev, pf->dplls.rclk.pin); + dpll_netdev_pin_set(vsi->netdev, pf->dplls.rclk.pin); return 0; diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c index a655d499ab..9069725c71 100644 --- a/drivers/net/ethernet/intel/ice/ice_eswitch.c +++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c @@ -11,17 +11,34 @@ #include "ice_tc_lib.h" /** - * ice_eswitch_add_vf_sp_rule - add adv rule with VF's VSI index + * ice_eswitch_del_sp_rules - delete adv rules added on PRs + * @pf: pointer to the PF struct + * + * Delete all advanced rules that were used to forward packets with the + * device's VSI index to the corresponding eswitch ctrl VSI queue. + */ +static void ice_eswitch_del_sp_rules(struct ice_pf *pf) +{ + struct ice_repr *repr; + unsigned long id; + + xa_for_each(&pf->eswitch.reprs, id, repr) { + if (repr->sp_rule.rid) + ice_rem_adv_rule_by_id(&pf->hw, &repr->sp_rule); + } +} + +/** + * ice_eswitch_add_sp_rule - add adv rule with device's VSI index * @pf: pointer to PF struct - * @vf: pointer to VF struct + * @repr: pointer to the repr struct * * This function adds advanced rule that forwards packets with - * VF's VSI index to the corresponding switchdev ctrl VSI queue. + * device's VSI index to the corresponding eswitch ctrl VSI queue. */ -static int -ice_eswitch_add_vf_sp_rule(struct ice_pf *pf, struct ice_vf *vf) +static int ice_eswitch_add_sp_rule(struct ice_pf *pf, struct ice_repr *repr) { - struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; + struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi; struct ice_adv_rule_info rule_info = { 0 }; struct ice_adv_lkup_elem *list; struct ice_hw *hw = &pf->hw; @@ -38,39 +55,42 @@ ice_eswitch_add_vf_sp_rule(struct ice_pf *pf, struct ice_vf *vf) rule_info.sw_act.vsi_handle = ctrl_vsi->idx; rule_info.sw_act.fltr_act = ICE_FWD_TO_Q; rule_info.sw_act.fwd_id.q_id = hw->func_caps.common_cap.rxq_first_id + - ctrl_vsi->rxq_map[vf->vf_id]; + ctrl_vsi->rxq_map[repr->q_id]; rule_info.flags_info.act |= ICE_SINGLE_ACT_LB_ENABLE; rule_info.flags_info.act_valid = true; rule_info.tun_type = ICE_SW_TUN_AND_NON_TUN; - rule_info.src_vsi = vf->lan_vsi_idx; + rule_info.src_vsi = repr->src_vsi->idx; err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, - &vf->repr->sp_rule); + &repr->sp_rule); if (err) - dev_err(ice_pf_to_dev(pf), "Unable to add VF slow-path rule in switchdev mode for VF %d", - vf->vf_id); + dev_err(ice_pf_to_dev(pf), "Unable to add slow-path rule for eswitch for PR %d", + repr->id); kfree(list); return err; } -/** - * ice_eswitch_del_vf_sp_rule - delete adv rule with VF's VSI index - * @vf: pointer to the VF struct - * - * Delete the advanced rule that was used to forward packets with the VF's VSI - * index to the corresponding switchdev ctrl VSI queue. - */ -static void ice_eswitch_del_vf_sp_rule(struct ice_vf *vf) +static int +ice_eswitch_add_sp_rules(struct ice_pf *pf) { - if (!vf->repr) - return; + struct ice_repr *repr; + unsigned long id; + int err; + + xa_for_each(&pf->eswitch.reprs, id, repr) { + err = ice_eswitch_add_sp_rule(pf, repr); + if (err) { + ice_eswitch_del_sp_rules(pf); + return err; + } + } - ice_rem_adv_rule_by_id(&vf->pf->hw, &vf->repr->sp_rule); + return 0; } /** - * ice_eswitch_setup_env - configure switchdev HW filters + * ice_eswitch_setup_env - configure eswitch HW filters * @pf: pointer to PF struct * * This function adds HW filters configuration specific for switchdev @@ -78,18 +98,18 @@ static void ice_eswitch_del_vf_sp_rule(struct ice_vf *vf) */ static int ice_eswitch_setup_env(struct ice_pf *pf) { - struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi; - struct net_device *uplink_netdev = uplink_vsi->netdev; - struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; + struct ice_vsi *uplink_vsi = pf->eswitch.uplink_vsi; + struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi; + struct net_device *netdev = uplink_vsi->netdev; struct ice_vsi_vlan_ops *vlan_ops; bool rule_added = false; ice_remove_vsi_fltr(&pf->hw, uplink_vsi->idx); - netif_addr_lock_bh(uplink_netdev); - __dev_uc_unsync(uplink_netdev, NULL); - __dev_mc_unsync(uplink_netdev, NULL); - netif_addr_unlock_bh(uplink_netdev); + netif_addr_lock_bh(netdev); + __dev_uc_unsync(netdev, NULL); + __dev_mc_unsync(netdev, NULL); + netif_addr_unlock_bh(netdev); if (ice_vsi_add_vlan_zero(uplink_vsi)) goto err_def_rx; @@ -132,19 +152,20 @@ err_def_rx: } /** - * ice_eswitch_remap_rings_to_vectors - reconfigure rings of switchdev ctrl VSI - * @pf: pointer to PF struct + * ice_eswitch_remap_rings_to_vectors - reconfigure rings of eswitch ctrl VSI + * @eswitch: pointer to eswitch struct * - * In switchdev number of allocated Tx/Rx rings is equal. + * In eswitch number of allocated Tx/Rx rings is equal. * * This function fills q_vectors structures associated with representor and * move each ring pairs to port representor netdevs. Each port representor * will have dedicated 1 Tx/Rx ring pair, so number of rings pair is equal to * number of VFs. */ -static void ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf) +static void ice_eswitch_remap_rings_to_vectors(struct ice_eswitch *eswitch) { - struct ice_vsi *vsi = pf->switchdev.control_vsi; + struct ice_vsi *vsi = eswitch->control_vsi; + unsigned long repr_id = 0; int q_id; ice_for_each_txq(vsi, q_id) { @@ -152,13 +173,14 @@ static void ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf) struct ice_tx_ring *tx_ring; struct ice_rx_ring *rx_ring; struct ice_repr *repr; - struct ice_vf *vf; - vf = ice_get_vf_by_id(pf, q_id); - if (WARN_ON(!vf)) - continue; + repr = xa_find(&eswitch->reprs, &repr_id, U32_MAX, + XA_PRESENT); + if (!repr) + break; - repr = vf->repr; + repr_id += 1; + repr->q_id = q_id; q_vector = repr->q_vector; tx_ring = vsi->tx_rings[q_id]; rx_ring = vsi->rx_rings[q_id]; @@ -181,136 +203,96 @@ static void ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf) rx_ring->q_vector = q_vector; rx_ring->next = NULL; rx_ring->netdev = repr->netdev; - - ice_put_vf(vf); } } /** - * ice_eswitch_release_reprs - clear PR VSIs configuration + * ice_eswitch_release_repr - clear PR VSI configuration * @pf: poiner to PF struct - * @ctrl_vsi: pointer to switchdev control VSI + * @repr: pointer to PR */ static void -ice_eswitch_release_reprs(struct ice_pf *pf, struct ice_vsi *ctrl_vsi) +ice_eswitch_release_repr(struct ice_pf *pf, struct ice_repr *repr) { - struct ice_vf *vf; - unsigned int bkt; + struct ice_vsi *vsi = repr->src_vsi; - lockdep_assert_held(&pf->vfs.table_lock); - - ice_for_each_vf(pf, bkt, vf) { - struct ice_vsi *vsi = vf->repr->src_vsi; - - /* Skip VFs that aren't configured */ - if (!vf->repr->dst) - continue; + /* Skip representors that aren't configured */ + if (!repr->dst) + return; - ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof); - metadata_dst_free(vf->repr->dst); - vf->repr->dst = NULL; - ice_eswitch_del_vf_sp_rule(vf); - ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr, - ICE_FWD_TO_VSI); + ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof); + metadata_dst_free(repr->dst); + repr->dst = NULL; + ice_fltr_add_mac_and_broadcast(vsi, repr->parent_mac, + ICE_FWD_TO_VSI); - netif_napi_del(&vf->repr->q_vector->napi); - } + netif_napi_del(&repr->q_vector->napi); } /** - * ice_eswitch_setup_reprs - configure port reprs to run in switchdev mode + * ice_eswitch_setup_repr - configure PR to run in switchdev mode * @pf: pointer to PF struct + * @repr: pointer to PR struct */ -static int ice_eswitch_setup_reprs(struct ice_pf *pf) +static int ice_eswitch_setup_repr(struct ice_pf *pf, struct ice_repr *repr) { - struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; - int max_vsi_num = 0; - struct ice_vf *vf; - unsigned int bkt; - - lockdep_assert_held(&pf->vfs.table_lock); - - ice_for_each_vf(pf, bkt, vf) { - struct ice_vsi *vsi = vf->repr->src_vsi; - - ice_remove_vsi_fltr(&pf->hw, vsi->idx); - vf->repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, - GFP_KERNEL); - if (!vf->repr->dst) { - ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr, - ICE_FWD_TO_VSI); - goto err; - } + struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi; + struct ice_vsi *vsi = repr->src_vsi; + struct metadata_dst *dst; - if (ice_eswitch_add_vf_sp_rule(pf, vf)) { - ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr, - ICE_FWD_TO_VSI); - goto err; - } + ice_remove_vsi_fltr(&pf->hw, vsi->idx); + repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, + GFP_KERNEL); + if (!repr->dst) + goto err_add_mac_fltr; - if (ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof)) { - ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr, - ICE_FWD_TO_VSI); - ice_eswitch_del_vf_sp_rule(vf); - metadata_dst_free(vf->repr->dst); - vf->repr->dst = NULL; - goto err; - } + if (ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof)) + goto err_dst_free; - if (ice_vsi_add_vlan_zero(vsi)) { - ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr, - ICE_FWD_TO_VSI); - ice_eswitch_del_vf_sp_rule(vf); - metadata_dst_free(vf->repr->dst); - vf->repr->dst = NULL; - ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof); - goto err; - } - - if (max_vsi_num < vsi->vsi_num) - max_vsi_num = vsi->vsi_num; + if (ice_vsi_add_vlan_zero(vsi)) + goto err_update_security; - netif_napi_add(vf->repr->netdev, &vf->repr->q_vector->napi, - ice_napi_poll); + netif_napi_add(repr->netdev, &repr->q_vector->napi, + ice_napi_poll); - netif_keep_dst(vf->repr->netdev); - } + netif_keep_dst(repr->netdev); - ice_for_each_vf(pf, bkt, vf) { - struct ice_repr *repr = vf->repr; - struct ice_vsi *vsi = repr->src_vsi; - struct metadata_dst *dst; - - dst = repr->dst; - dst->u.port_info.port_id = vsi->vsi_num; - dst->u.port_info.lower_dev = repr->netdev; - ice_repr_set_traffic_vsi(repr, ctrl_vsi); - } + dst = repr->dst; + dst->u.port_info.port_id = vsi->vsi_num; + dst->u.port_info.lower_dev = repr->netdev; + ice_repr_set_traffic_vsi(repr, ctrl_vsi); return 0; -err: - ice_eswitch_release_reprs(pf, ctrl_vsi); +err_update_security: + ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof); +err_dst_free: + metadata_dst_free(repr->dst); + repr->dst = NULL; +err_add_mac_fltr: + ice_fltr_add_mac_and_broadcast(vsi, repr->parent_mac, ICE_FWD_TO_VSI); return -ENODEV; } /** - * ice_eswitch_update_repr - reconfigure VF port representor - * @vsi: VF VSI for which port representor is configured + * ice_eswitch_update_repr - reconfigure port representor + * @repr_id: representor ID + * @vsi: VSI for which port representor is configured */ -void ice_eswitch_update_repr(struct ice_vsi *vsi) +void ice_eswitch_update_repr(unsigned long repr_id, struct ice_vsi *vsi) { struct ice_pf *pf = vsi->back; struct ice_repr *repr; - struct ice_vf *vf; int ret; if (!ice_is_switchdev_running(pf)) return; - vf = vsi->vf; - repr = vf->repr; + repr = xa_load(&pf->eswitch.reprs, repr_id); + if (!repr) + return; + repr->src_vsi = vsi; repr->dst->u.port_info.port_id = vsi->vsi_num; @@ -319,9 +301,10 @@ void ice_eswitch_update_repr(struct ice_vsi *vsi) ret = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof); if (ret) { - ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr, ICE_FWD_TO_VSI); - dev_err(ice_pf_to_dev(pf), "Failed to update VF %d port representor", - vsi->vf->vf_id); + ice_fltr_add_mac_and_broadcast(vsi, repr->parent_mac, + ICE_FWD_TO_VSI); + dev_err(ice_pf_to_dev(pf), "Failed to update VSI of port representor %d", + repr->id); } } @@ -353,13 +336,13 @@ ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev) skb_dst_drop(skb); dst_hold((struct dst_entry *)repr->dst); skb_dst_set(skb, (struct dst_entry *)repr->dst); - skb->queue_mapping = repr->vf->vf_id; + skb->queue_mapping = repr->q_id; return ice_start_xmit(skb, netdev); } /** - * ice_eswitch_set_target_vsi - set switchdev context in Tx context descriptor + * ice_eswitch_set_target_vsi - set eswitch context in Tx context descriptor * @skb: pointer to send buffer * @off: pointer to offload struct */ @@ -375,14 +358,14 @@ ice_eswitch_set_target_vsi(struct sk_buff *skb, off->cd_qw1 |= (cd_cmd | ICE_TX_DESC_DTYPE_CTX); } else { cd_cmd = ICE_TX_CTX_DESC_SWTCH_VSI << ICE_TXD_CTX_QW1_CMD_S; - dst_vsi = ((u64)dst->u.port_info.port_id << - ICE_TXD_CTX_QW1_VSI_S) & ICE_TXD_CTX_QW1_VSI_M; + dst_vsi = FIELD_PREP(ICE_TXD_CTX_QW1_VSI_M, + dst->u.port_info.port_id); off->cd_qw1 = cd_cmd | dst_vsi | ICE_TX_DESC_DTYPE_CTX; } } /** - * ice_eswitch_release_env - clear switchdev HW filters + * ice_eswitch_release_env - clear eswitch HW filters * @pf: pointer to PF struct * * This function removes HW filters configuration specific for switchdev @@ -390,8 +373,8 @@ ice_eswitch_set_target_vsi(struct sk_buff *skb, */ static void ice_eswitch_release_env(struct ice_pf *pf) { - struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi; - struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; + struct ice_vsi *uplink_vsi = pf->eswitch.uplink_vsi; + struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi; struct ice_vsi_vlan_ops *vlan_ops; vlan_ops = ice_get_compat_vsi_vlan_ops(uplink_vsi); @@ -407,7 +390,7 @@ static void ice_eswitch_release_env(struct ice_pf *pf) } /** - * ice_eswitch_vsi_setup - configure switchdev control VSI + * ice_eswitch_vsi_setup - configure eswitch control VSI * @pf: pointer to PF structure * @pi: pointer to port_info structure */ @@ -423,49 +406,30 @@ ice_eswitch_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) return ice_vsi_setup(pf, ¶ms); } -/** - * ice_eswitch_napi_del - remove NAPI handle for all port representors - * @pf: pointer to PF structure - */ -static void ice_eswitch_napi_del(struct ice_pf *pf) -{ - struct ice_vf *vf; - unsigned int bkt; - - lockdep_assert_held(&pf->vfs.table_lock); - - ice_for_each_vf(pf, bkt, vf) - netif_napi_del(&vf->repr->q_vector->napi); -} - /** * ice_eswitch_napi_enable - enable NAPI for all port representors - * @pf: pointer to PF structure + * @reprs: xarray of reprs */ -static void ice_eswitch_napi_enable(struct ice_pf *pf) +static void ice_eswitch_napi_enable(struct xarray *reprs) { - struct ice_vf *vf; - unsigned int bkt; - - lockdep_assert_held(&pf->vfs.table_lock); + struct ice_repr *repr; + unsigned long id; - ice_for_each_vf(pf, bkt, vf) - napi_enable(&vf->repr->q_vector->napi); + xa_for_each(reprs, id, repr) + napi_enable(&repr->q_vector->napi); } /** * ice_eswitch_napi_disable - disable NAPI for all port representors - * @pf: pointer to PF structure + * @reprs: xarray of reprs */ -static void ice_eswitch_napi_disable(struct ice_pf *pf) +static void ice_eswitch_napi_disable(struct xarray *reprs) { - struct ice_vf *vf; - unsigned int bkt; - - lockdep_assert_held(&pf->vfs.table_lock); + struct ice_repr *repr; + unsigned long id; - ice_for_each_vf(pf, bkt, vf) - napi_disable(&vf->repr->q_vector->napi); + xa_for_each(reprs, id, repr) + napi_disable(&repr->q_vector->napi); } /** @@ -486,39 +450,26 @@ static int ice_eswitch_enable_switchdev(struct ice_pf *pf) return -EINVAL; } - pf->switchdev.control_vsi = ice_eswitch_vsi_setup(pf, pf->hw.port_info); - if (!pf->switchdev.control_vsi) + pf->eswitch.control_vsi = ice_eswitch_vsi_setup(pf, pf->hw.port_info); + if (!pf->eswitch.control_vsi) return -ENODEV; - ctrl_vsi = pf->switchdev.control_vsi; - pf->switchdev.uplink_vsi = uplink_vsi; + ctrl_vsi = pf->eswitch.control_vsi; + /* cp VSI is createad with 1 queue as default */ + pf->eswitch.qs.value = 1; + pf->eswitch.uplink_vsi = uplink_vsi; if (ice_eswitch_setup_env(pf)) goto err_vsi; - if (ice_repr_add_for_all_vfs(pf)) - goto err_repr_add; - - if (ice_eswitch_setup_reprs(pf)) - goto err_setup_reprs; - - ice_eswitch_remap_rings_to_vectors(pf); - - if (ice_vsi_open(ctrl_vsi)) - goto err_setup_reprs; - if (ice_eswitch_br_offloads_init(pf)) goto err_br_offloads; - ice_eswitch_napi_enable(pf); + pf->eswitch.is_running = true; return 0; err_br_offloads: - ice_vsi_close(ctrl_vsi); -err_setup_reprs: - ice_repr_rem_from_all_vfs(pf); -err_repr_add: ice_eswitch_release_env(pf); err_vsi: ice_vsi_release(ctrl_vsi); @@ -526,19 +477,19 @@ err_vsi: } /** - * ice_eswitch_disable_switchdev - disable switchdev resources + * ice_eswitch_disable_switchdev - disable eswitch resources * @pf: pointer to PF structure */ static void ice_eswitch_disable_switchdev(struct ice_pf *pf) { - struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; + struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi; - ice_eswitch_napi_disable(pf); ice_eswitch_br_offloads_deinit(pf); ice_eswitch_release_env(pf); - ice_eswitch_release_reprs(pf, ctrl_vsi); ice_vsi_release(ctrl_vsi); - ice_repr_rem_from_all_vfs(pf); + + pf->eswitch.is_running = false; + pf->eswitch.qs.is_reaching = false; } /** @@ -566,6 +517,7 @@ ice_eswitch_mode_set(struct devlink *devlink, u16 mode, case DEVLINK_ESWITCH_MODE_LEGACY: dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to legacy", pf->hw.pf_id); + xa_destroy(&pf->eswitch.reprs); NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to legacy"); break; case DEVLINK_ESWITCH_MODE_SWITCHDEV: @@ -578,6 +530,7 @@ ice_eswitch_mode_set(struct devlink *devlink, u16 mode, dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to switchdev", pf->hw.pf_id); + xa_init_flags(&pf->eswitch.reprs, XA_FLAGS_ALLOC); NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to switchdev"); break; } @@ -616,74 +569,168 @@ bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf) } /** - * ice_eswitch_release - cleanup eswitch + * ice_eswitch_start_all_tx_queues - start Tx queues of all port representors * @pf: pointer to PF structure */ -void ice_eswitch_release(struct ice_pf *pf) +static void ice_eswitch_start_all_tx_queues(struct ice_pf *pf) { - if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY) + struct ice_repr *repr; + unsigned long id; + + if (test_bit(ICE_DOWN, pf->state)) return; - ice_eswitch_disable_switchdev(pf); - pf->switchdev.is_running = false; + xa_for_each(&pf->eswitch.reprs, id, repr) + ice_repr_start_tx_queues(repr); } /** - * ice_eswitch_configure - configure eswitch + * ice_eswitch_stop_all_tx_queues - stop Tx queues of all port representors * @pf: pointer to PF structure */ -int ice_eswitch_configure(struct ice_pf *pf) +void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf) { - int status; - - if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY || pf->switchdev.is_running) - return 0; + struct ice_repr *repr; + unsigned long id; - status = ice_eswitch_enable_switchdev(pf); - if (status) - return status; + if (test_bit(ICE_DOWN, pf->state)) + return; - pf->switchdev.is_running = true; - return 0; + xa_for_each(&pf->eswitch.reprs, id, repr) + ice_repr_stop_tx_queues(repr); } -/** - * ice_eswitch_start_all_tx_queues - start Tx queues of all port representors - * @pf: pointer to PF structure - */ -static void ice_eswitch_start_all_tx_queues(struct ice_pf *pf) +static void ice_eswitch_stop_reprs(struct ice_pf *pf) { - struct ice_vf *vf; - unsigned int bkt; + ice_eswitch_del_sp_rules(pf); + ice_eswitch_stop_all_tx_queues(pf); + ice_eswitch_napi_disable(&pf->eswitch.reprs); +} - lockdep_assert_held(&pf->vfs.table_lock); +static void ice_eswitch_start_reprs(struct ice_pf *pf) +{ + ice_eswitch_napi_enable(&pf->eswitch.reprs); + ice_eswitch_start_all_tx_queues(pf); + ice_eswitch_add_sp_rules(pf); +} - if (test_bit(ICE_DOWN, pf->state)) - return; +static void +ice_eswitch_cp_change_queues(struct ice_eswitch *eswitch, int change) +{ + struct ice_vsi *cp = eswitch->control_vsi; + int queues = 0; + + if (eswitch->qs.is_reaching) { + if (eswitch->qs.to_reach >= eswitch->qs.value + change) { + queues = eswitch->qs.to_reach; + eswitch->qs.is_reaching = false; + } else { + queues = 0; + } + } else if ((change > 0 && cp->alloc_txq <= eswitch->qs.value) || + change < 0) { + queues = cp->alloc_txq + change; + } - ice_for_each_vf(pf, bkt, vf) { - if (vf->repr) - ice_repr_start_tx_queues(vf->repr); + if (queues) { + cp->req_txq = queues; + cp->req_rxq = queues; + ice_vsi_close(cp); + ice_vsi_rebuild(cp, ICE_VSI_FLAG_NO_INIT); + ice_vsi_open(cp); + } else if (!change) { + /* change == 0 means that VSI wasn't open, open it here */ + ice_vsi_open(cp); } + + eswitch->qs.value += change; + ice_eswitch_remap_rings_to_vectors(eswitch); } -/** - * ice_eswitch_stop_all_tx_queues - stop Tx queues of all port representors - * @pf: pointer to PF structure - */ -void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf) +int +ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf) { - struct ice_vf *vf; - unsigned int bkt; + struct ice_repr *repr; + int change = 1; + int err; - lockdep_assert_held(&pf->vfs.table_lock); + if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY) + return 0; - if (test_bit(ICE_DOWN, pf->state)) + if (xa_empty(&pf->eswitch.reprs)) { + err = ice_eswitch_enable_switchdev(pf); + if (err) + return err; + /* Control plane VSI is created with 1 queue as default */ + pf->eswitch.qs.to_reach -= 1; + change = 0; + } + + ice_eswitch_stop_reprs(pf); + + repr = ice_repr_add_vf(vf); + if (IS_ERR(repr)) { + err = PTR_ERR(repr); + goto err_create_repr; + } + + err = ice_eswitch_setup_repr(pf, repr); + if (err) + goto err_setup_repr; + + err = xa_alloc(&pf->eswitch.reprs, &repr->id, repr, + XA_LIMIT(1, INT_MAX), GFP_KERNEL); + if (err) + goto err_xa_alloc; + + vf->repr_id = repr->id; + + ice_eswitch_cp_change_queues(&pf->eswitch, change); + ice_eswitch_start_reprs(pf); + + return 0; + +err_xa_alloc: + ice_eswitch_release_repr(pf, repr); +err_setup_repr: + ice_repr_rem_vf(repr); +err_create_repr: + if (xa_empty(&pf->eswitch.reprs)) + ice_eswitch_disable_switchdev(pf); + ice_eswitch_start_reprs(pf); + + return err; +} + +void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf) +{ + struct ice_repr *repr = xa_load(&pf->eswitch.reprs, vf->repr_id); + struct devlink *devlink = priv_to_devlink(pf); + + if (!repr) return; - ice_for_each_vf(pf, bkt, vf) { - if (vf->repr) - ice_repr_stop_tx_queues(vf->repr); + ice_eswitch_stop_reprs(pf); + xa_erase(&pf->eswitch.reprs, repr->id); + + if (xa_empty(&pf->eswitch.reprs)) + ice_eswitch_disable_switchdev(pf); + else + ice_eswitch_cp_change_queues(&pf->eswitch, -1); + + ice_eswitch_release_repr(pf, repr); + ice_repr_rem_vf(repr); + + if (xa_empty(&pf->eswitch.reprs)) { + /* since all port representors are destroyed, there is + * no point in keeping the nodes + */ + ice_devlink_rate_clear_tx_topology(ice_get_main_vsi(pf)); + devl_lock(devlink); + devl_rate_nodes_destroy(devlink); + devl_unlock(devlink); + } else { + ice_eswitch_start_reprs(pf); } } @@ -693,30 +740,35 @@ void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf) */ int ice_eswitch_rebuild(struct ice_pf *pf) { - struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; - int status; - - ice_eswitch_napi_disable(pf); - ice_eswitch_napi_del(pf); - - status = ice_eswitch_setup_env(pf); - if (status) - return status; + struct ice_repr *repr; + unsigned long id; + int err; - status = ice_eswitch_setup_reprs(pf); - if (status) - return status; + if (!ice_is_switchdev_running(pf)) + return 0; - ice_eswitch_remap_rings_to_vectors(pf); + err = ice_vsi_rebuild(pf->eswitch.control_vsi, ICE_VSI_FLAG_INIT); + if (err) + return err; - ice_replay_tc_fltrs(pf); + xa_for_each(&pf->eswitch.reprs, id, repr) + ice_eswitch_detach(pf, repr->vf); - status = ice_vsi_open(ctrl_vsi); - if (status) - return status; + return 0; +} - ice_eswitch_napi_enable(pf); - ice_eswitch_start_all_tx_queues(pf); +/** + * ice_eswitch_reserve_cp_queues - reserve control plane VSI queues + * @pf: pointer to PF structure + * @change: how many more (or less) queues is needed + * + * Remember to call ice_eswitch_attach/detach() the "change" times. + */ +void ice_eswitch_reserve_cp_queues(struct ice_pf *pf, int change) +{ + if (pf->eswitch.qs.value + change < 0) + return; - return 0; + pf->eswitch.qs.to_reach = pf->eswitch.qs.value + change; + pf->eswitch.qs.is_reaching = true; } diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.h b/drivers/net/ethernet/intel/ice/ice_eswitch.h index b18bf83a2f..1a288a03a7 100644 --- a/drivers/net/ethernet/intel/ice/ice_eswitch.h +++ b/drivers/net/ethernet/intel/ice/ice_eswitch.h @@ -7,8 +7,9 @@ #include #ifdef CONFIG_ICE_SWITCHDEV -void ice_eswitch_release(struct ice_pf *pf); -int ice_eswitch_configure(struct ice_pf *pf); +void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf); +int +ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf); int ice_eswitch_rebuild(struct ice_pf *pf); int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode); @@ -17,7 +18,7 @@ ice_eswitch_mode_set(struct devlink *devlink, u16 mode, struct netlink_ext_ack *extack); bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf); -void ice_eswitch_update_repr(struct ice_vsi *vsi); +void ice_eswitch_update_repr(unsigned long repr_id, struct ice_vsi *vsi); void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf); @@ -25,8 +26,15 @@ void ice_eswitch_set_target_vsi(struct sk_buff *skb, struct ice_tx_offload_params *off); netdev_tx_t ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev); +void ice_eswitch_reserve_cp_queues(struct ice_pf *pf, int change); #else /* CONFIG_ICE_SWITCHDEV */ -static inline void ice_eswitch_release(struct ice_pf *pf) { } +static inline void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf) { } + +static inline int +ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf) +{ + return -EOPNOTSUPP; +} static inline void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf) { } @@ -34,7 +42,8 @@ static inline void ice_eswitch_set_target_vsi(struct sk_buff *skb, struct ice_tx_offload_params *off) { } -static inline void ice_eswitch_update_repr(struct ice_vsi *vsi) { } +static inline void +ice_eswitch_update_repr(unsigned long repr_id, struct ice_vsi *vsi) { } static inline int ice_eswitch_configure(struct ice_pf *pf) { @@ -68,5 +77,8 @@ ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev) { return NETDEV_TX_BUSY; } + +static inline void +ice_eswitch_reserve_cp_queues(struct ice_pf *pf, int change) { } #endif /* CONFIG_ICE_SWITCHDEV */ #endif /* _ICE_ESWITCH_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch_br.c b/drivers/net/ethernet/intel/ice/ice_eswitch_br.c index 6ae0269bdf..ac5beecd02 100644 --- a/drivers/net/ethernet/intel/ice/ice_eswitch_br.c +++ b/drivers/net/ethernet/intel/ice/ice_eswitch_br.c @@ -893,10 +893,14 @@ ice_eswitch_br_port_deinit(struct ice_esw_br *bridge, ice_eswitch_br_fdb_entry_delete(bridge, fdb_entry); } - if (br_port->type == ICE_ESWITCH_BR_UPLINK_PORT && vsi->back) + if (br_port->type == ICE_ESWITCH_BR_UPLINK_PORT && vsi->back) { vsi->back->br_port = NULL; - else if (vsi->vf && vsi->vf->repr) - vsi->vf->repr->br_port = NULL; + } else { + struct ice_repr *repr = ice_repr_get_by_vsi(vsi); + + if (repr) + repr->br_port = NULL; + } xa_erase(&bridge->ports, br_port->vsi_idx); ice_eswitch_br_port_vlans_flush(br_port); @@ -947,7 +951,7 @@ ice_eswitch_br_vf_repr_port_init(struct ice_esw_br *bridge, static int ice_eswitch_br_uplink_port_init(struct ice_esw_br *bridge, struct ice_pf *pf) { - struct ice_vsi *vsi = pf->switchdev.uplink_vsi; + struct ice_vsi *vsi = pf->eswitch.uplink_vsi; struct ice_esw_br_port *br_port; int err; @@ -1185,7 +1189,7 @@ ice_eswitch_br_port_event(struct notifier_block *nb, static void ice_eswitch_br_offloads_dealloc(struct ice_pf *pf) { - struct ice_esw_br_offloads *br_offloads = pf->switchdev.br_offloads; + struct ice_esw_br_offloads *br_offloads = pf->eswitch.br_offloads; ASSERT_RTNL(); @@ -1194,7 +1198,7 @@ ice_eswitch_br_offloads_dealloc(struct ice_pf *pf) ice_eswitch_br_deinit(br_offloads, br_offloads->bridge); - pf->switchdev.br_offloads = NULL; + pf->eswitch.br_offloads = NULL; kfree(br_offloads); } @@ -1205,14 +1209,14 @@ ice_eswitch_br_offloads_alloc(struct ice_pf *pf) ASSERT_RTNL(); - if (pf->switchdev.br_offloads) + if (pf->eswitch.br_offloads) return ERR_PTR(-EEXIST); br_offloads = kzalloc(sizeof(*br_offloads), GFP_KERNEL); if (!br_offloads) return ERR_PTR(-ENOMEM); - pf->switchdev.br_offloads = br_offloads; + pf->eswitch.br_offloads = br_offloads; br_offloads->pf = pf; return br_offloads; @@ -1223,7 +1227,7 @@ ice_eswitch_br_offloads_deinit(struct ice_pf *pf) { struct ice_esw_br_offloads *br_offloads; - br_offloads = pf->switchdev.br_offloads; + br_offloads = pf->eswitch.br_offloads; if (!br_offloads) return; diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index bde9bc74f9..a19b06f18e 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -129,7 +129,6 @@ static const struct ice_stats ice_gstrings_pf_stats[] = { ICE_PF_STAT("rx_oversize.nic", stats.rx_oversize), ICE_PF_STAT("rx_jabber.nic", stats.rx_jabber), ICE_PF_STAT("rx_csum_bad.nic", hw_csum_rx_error), - ICE_PF_STAT("rx_length_errors.nic", stats.rx_len_errors), ICE_PF_STAT("rx_dropped.nic", stats.eth.rx_discards), ICE_PF_STAT("rx_crc_errors.nic", stats.crc_errors), ICE_PF_STAT("illegal_bytes.nic", stats.illegal_bytes), @@ -1142,8 +1141,7 @@ __ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data, switch (stringset) { case ETH_SS_STATS: for (i = 0; i < ICE_VSI_STATS_LEN; i++) - ethtool_sprintf(&p, "%s", - ice_gstrings_vsi_stats[i].stat_string); + ethtool_puts(&p, ice_gstrings_vsi_stats[i].stat_string); if (ice_is_port_repr_netdev(netdev)) return; @@ -1162,8 +1160,7 @@ __ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data, return; for (i = 0; i < ICE_PF_STATS_LEN; i++) - ethtool_sprintf(&p, "%s", - ice_gstrings_pf_stats[i].stat_string); + ethtool_puts(&p, ice_gstrings_pf_stats[i].stat_string); for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) { ethtool_sprintf(&p, "tx_priority_%u_xon.nic", i); @@ -1179,8 +1176,7 @@ __ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data, break; case ETH_SS_PRIV_FLAGS: for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++) - ethtool_sprintf(&p, "%s", - ice_gstrings_priv_flags[i].name); + ethtool_puts(&p, ice_gstrings_priv_flags[i].name); break; default: break; @@ -2505,27 +2501,15 @@ static u32 ice_parse_hdrs(struct ethtool_rxnfc *nfc) return hdrs; } -#define ICE_FLOW_HASH_FLD_IPV4_SA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) -#define ICE_FLOW_HASH_FLD_IPV6_SA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) -#define ICE_FLOW_HASH_FLD_IPV4_DA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) -#define ICE_FLOW_HASH_FLD_IPV6_DA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA) -#define ICE_FLOW_HASH_FLD_TCP_SRC_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT) -#define ICE_FLOW_HASH_FLD_TCP_DST_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT) -#define ICE_FLOW_HASH_FLD_UDP_SRC_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT) -#define ICE_FLOW_HASH_FLD_UDP_DST_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT) -#define ICE_FLOW_HASH_FLD_SCTP_SRC_PORT \ - BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT) -#define ICE_FLOW_HASH_FLD_SCTP_DST_PORT \ - BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT) - /** * ice_parse_hash_flds - parses hash fields from RSS hash input * @nfc: ethtool rxnfc command + * @symm: true if Symmetric Topelitz is set * * This function parses the rxnfc command and returns intended * hash fields for RSS configuration */ -static u64 ice_parse_hash_flds(struct ethtool_rxnfc *nfc) +static u64 ice_parse_hash_flds(struct ethtool_rxnfc *nfc, bool symm) { u64 hfld = ICE_HASH_INVALID; @@ -2594,9 +2578,11 @@ static int ice_set_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc) { struct ice_pf *pf = vsi->back; + struct ice_rss_hash_cfg cfg; struct device *dev; u64 hashed_flds; int status; + bool symm; u32 hdrs; dev = ice_pf_to_dev(pf); @@ -2606,7 +2592,8 @@ ice_set_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc) return -EINVAL; } - hashed_flds = ice_parse_hash_flds(nfc); + symm = !!(vsi->rss_hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ); + hashed_flds = ice_parse_hash_flds(nfc, symm); if (hashed_flds == ICE_HASH_INVALID) { dev_dbg(dev, "Invalid hash fields, vsi num = %d\n", vsi->vsi_num); @@ -2620,7 +2607,12 @@ ice_set_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc) return -EINVAL; } - status = ice_add_rss_cfg(&pf->hw, vsi->idx, hashed_flds, hdrs); + cfg.hash_flds = hashed_flds; + cfg.addl_hdrs = hdrs; + cfg.hdr_type = ICE_RSS_ANY_HEADERS; + cfg.symm = symm; + + status = ice_add_rss_cfg(&pf->hw, vsi, &cfg); if (status) { dev_dbg(dev, "ice_add_rss_cfg failed, vsi num = %d, error = %d\n", vsi->vsi_num, status); @@ -2641,6 +2633,7 @@ ice_get_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc) struct ice_pf *pf = vsi->back; struct device *dev; u64 hash_flds; + bool symm; u32 hdrs; dev = ice_pf_to_dev(pf); @@ -2659,7 +2652,7 @@ ice_get_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc) return; } - hash_flds = ice_get_rss_cfg(&pf->hw, vsi->idx, hdrs); + hash_flds = ice_get_rss_cfg(&pf->hw, vsi->idx, hdrs, &symm); if (hash_flds == ICE_HASH_INVALID) { dev_dbg(dev, "No hash fields found for the given header type, vsi num = %d\n", vsi->vsi_num); @@ -3198,11 +3191,18 @@ static u32 ice_get_rxfh_indir_size(struct net_device *netdev) return np->vsi->rss_table_size; } +/** + * ice_get_rxfh - get the Rx flow hash indirection table + * @netdev: network interface device structure + * @rxfh: pointer to param struct (indir, key, hfunc) + * + * Reads the indirection table directly from the hardware. + */ static int -ice_get_rxfh_context(struct net_device *netdev, u32 *indir, - u8 *key, u8 *hfunc, u32 rss_context) +ice_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh) { struct ice_netdev_priv *np = netdev_priv(netdev); + u32 rss_context = rxfh->rss_context; struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; u16 qcount, offset; @@ -3233,17 +3233,18 @@ ice_get_rxfh_context(struct net_device *netdev, u32 *indir, vsi = vsi->tc_map_vsi[rss_context]; } - if (hfunc) - *hfunc = ETH_RSS_HASH_TOP; + rxfh->hfunc = ETH_RSS_HASH_TOP; + if (vsi->rss_hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ) + rxfh->input_xfrm |= RXH_XFRM_SYM_XOR; - if (!indir) + if (!rxfh->indir) return 0; lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); if (!lut) return -ENOMEM; - err = ice_get_rss_key(vsi, key); + err = ice_get_rss_key(vsi, rxfh->key); if (err) goto out; @@ -3253,55 +3254,44 @@ ice_get_rxfh_context(struct net_device *netdev, u32 *indir, if (ice_is_adq_active(pf)) { for (i = 0; i < vsi->rss_table_size; i++) - indir[i] = offset + lut[i] % qcount; + rxfh->indir[i] = offset + lut[i] % qcount; goto out; } for (i = 0; i < vsi->rss_table_size; i++) - indir[i] = lut[i]; + rxfh->indir[i] = lut[i]; out: kfree(lut); return err; } -/** - * ice_get_rxfh - get the Rx flow hash indirection table - * @netdev: network interface device structure - * @indir: indirection table - * @key: hash key - * @hfunc: hash function - * - * Reads the indirection table directly from the hardware. - */ -static int -ice_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) -{ - return ice_get_rxfh_context(netdev, indir, key, hfunc, 0); -} - /** * ice_set_rxfh - set the Rx flow hash indirection table * @netdev: network interface device structure - * @indir: indirection table - * @key: hash key - * @hfunc: hash function + * @rxfh: pointer to param struct (indir, key, hfunc) + * @extack: extended ACK from the Netlink message * * Returns -EINVAL if the table specifies an invalid queue ID, otherwise * returns 0 after programming the table. */ static int -ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, - const u8 hfunc) +ice_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh, + struct netlink_ext_ack *extack) { struct ice_netdev_priv *np = netdev_priv(netdev); + u8 hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ; struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; struct device *dev; int err; dev = ice_pf_to_dev(pf); - if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) + if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE && + rxfh->hfunc != ETH_RSS_HASH_TOP) + return -EOPNOTSUPP; + + if (rxfh->rss_context) return -EOPNOTSUPP; if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { @@ -3315,7 +3305,15 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, return -EOPNOTSUPP; } - if (key) { + /* Update the VSI's hash function */ + if (rxfh->input_xfrm & RXH_XFRM_SYM_XOR) + hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ; + + err = ice_set_rss_hfunc(vsi, hfunc); + if (err) + return err; + + if (rxfh->key) { if (!vsi->rss_hkey_user) { vsi->rss_hkey_user = devm_kzalloc(dev, ICE_VSIQF_HKEY_ARRAY_SIZE, @@ -3323,7 +3321,8 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, if (!vsi->rss_hkey_user) return -ENOMEM; } - memcpy(vsi->rss_hkey_user, key, ICE_VSIQF_HKEY_ARRAY_SIZE); + memcpy(vsi->rss_hkey_user, rxfh->key, + ICE_VSIQF_HKEY_ARRAY_SIZE); err = ice_set_rss_key(vsi, vsi->rss_hkey_user); if (err) @@ -3338,11 +3337,11 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, } /* Each 32 bits pointed by 'indir' is stored with a lut entry */ - if (indir) { + if (rxfh->indir) { int i; for (i = 0; i < vsi->rss_table_size; i++) - vsi->rss_lut_user[i] = (u8)(indir[i]); + vsi->rss_lut_user[i] = (u8)(rxfh->indir[i]); } else { ice_fill_rss_lut(vsi->rss_lut_user, vsi->rss_table_size, vsi->rss_size); @@ -4220,9 +4219,11 @@ ice_get_module_eeprom(struct net_device *netdev, } static const struct ethtool_ops ice_ethtool_ops = { + .cap_rss_ctx_supported = true, .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_USE_ADAPTIVE | ETHTOOL_COALESCE_RX_USECS_HIGH, + .cap_rss_sym_xor_supported = true, .get_link_ksettings = ice_get_link_ksettings, .set_link_ksettings = ice_set_link_ksettings, .get_drvinfo = ice_get_drvinfo, @@ -4253,7 +4254,6 @@ static const struct ethtool_ops ice_ethtool_ops = { .set_pauseparam = ice_set_pauseparam, .get_rxfh_key_size = ice_get_rxfh_key_size, .get_rxfh_indir_size = ice_get_rxfh_indir_size, - .get_rxfh_context = ice_get_rxfh_context, .get_rxfh = ice_get_rxfh, .set_rxfh = ice_set_rxfh, .get_channels = ice_get_channels, diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c index d151e5bacf..9a1a04f5f1 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c @@ -302,9 +302,7 @@ void ice_fdir_rem_adq_chnl(struct ice_hw *hw, u16 vsi_idx) continue; for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) { - u64 prof_id; - - prof_id = flow + tun * ICE_FLTR_PTYPE_MAX; + u64 prof_id = prof->prof_id[tun]; for (i = 0; i < prof->cnt; i++) { if (prof->vsi_h[i] != vsi_idx) @@ -362,10 +360,9 @@ ice_fdir_erase_flow_from_hw(struct ice_hw *hw, enum ice_block blk, int flow) return; for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) { - u64 prof_id; + u64 prof_id = prof->prof_id[tun]; int j; - prof_id = flow + tun * ICE_FLTR_PTYPE_MAX; for (j = 0; j < prof->cnt; j++) { u16 vsi_num; @@ -439,14 +436,12 @@ void ice_fdir_replay_flows(struct ice_hw *hw) for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) { struct ice_flow_prof *hw_prof; struct ice_fd_hw_prof *prof; - u64 prof_id; int j; prof = hw->fdir_prof[flow]; - prof_id = flow + tun * ICE_FLTR_PTYPE_MAX; - ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, + ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof->fdir_seg[tun], TNL_SEG_CNT(tun), - &hw_prof); + false, &hw_prof); for (j = 0; j < prof->cnt; j++) { enum ice_flow_priority prio; u64 entry_h = 0; @@ -454,7 +449,7 @@ void ice_fdir_replay_flows(struct ice_hw *hw) prio = ICE_FLOW_PRIO_NORMAL; err = ice_flow_add_entry(hw, ICE_BLK_FD, - prof_id, + hw_prof->id, prof->vsi_h[0], prof->vsi_h[j], prio, prof->fdir_seg, @@ -464,6 +459,7 @@ void ice_fdir_replay_flows(struct ice_hw *hw) flow); continue; } + prof->prof_id[tun] = hw_prof->id; prof->entry_h[j][tun] = entry_h; } } @@ -507,8 +503,7 @@ ice_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp, return -EINVAL; data->flex_word = value & ICE_USERDEF_FLEX_WORD_M; - data->flex_offset = (value & ICE_USERDEF_FLEX_OFFS_M) >> - ICE_USERDEF_FLEX_OFFS_S; + data->flex_offset = FIELD_GET(ICE_USERDEF_FLEX_OFFS_M, value); if (data->flex_offset > ICE_USERDEF_FLEX_MAX_OFFS_VAL) return -EINVAL; @@ -638,7 +633,6 @@ ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg, u64 entry1_h = 0; u64 entry2_h = 0; bool del_last; - u64 prof_id; int err; int idx; @@ -668,7 +662,7 @@ ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg, * then return error. */ if (hw->fdir_fltr_cnt[flow]) { - dev_err(dev, "Failed to add filter. Flow director filters on each port must have the same input set.\n"); + dev_err(dev, "Failed to add filter. Flow director filters on each port must have the same input set.\n"); return -EINVAL; } @@ -686,23 +680,23 @@ ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg, * That is the final parameters are 1 header (segment), no * actions (NULL) and zero actions 0. */ - prof_id = flow + tun * ICE_FLTR_PTYPE_MAX; - err = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg, - TNL_SEG_CNT(tun), &prof); + err = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, seg, + TNL_SEG_CNT(tun), false, &prof); if (err) return err; - err = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx, + err = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, main_vsi->idx, main_vsi->idx, ICE_FLOW_PRIO_NORMAL, seg, &entry1_h); if (err) goto err_prof; - err = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx, + err = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, main_vsi->idx, ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL, seg, &entry2_h); if (err) goto err_entry; hw_prof->fdir_seg[tun] = seg; + hw_prof->prof_id[tun] = prof->id; hw_prof->entry_h[0][tun] = entry1_h; hw_prof->entry_h[1][tun] = entry2_h; hw_prof->vsi_h[0] = main_vsi->idx; @@ -719,7 +713,7 @@ ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg, entry1_h = 0; vsi_h = main_vsi->tc_map_vsi[idx]->idx; - err = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, + err = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, main_vsi->idx, vsi_h, ICE_FLOW_PRIO_NORMAL, seg, &entry1_h); @@ -756,7 +750,7 @@ err_unroll: if (!hw_prof->entry_h[idx][tun]) continue; - ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id); + ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof->id); ice_flow_rem_entry(hw, ICE_BLK_FD, hw_prof->entry_h[idx][tun]); hw_prof->entry_h[idx][tun] = 0; if (del_last) @@ -766,11 +760,11 @@ err_unroll: hw_prof->cnt = 0; err_entry: ice_rem_prof_id_flow(hw, ICE_BLK_FD, - ice_get_hw_vsi_num(hw, main_vsi->idx), prof_id); + ice_get_hw_vsi_num(hw, main_vsi->idx), prof->id); ice_flow_rem_entry(hw, ICE_BLK_FD, entry1_h); err_prof: - ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id); - dev_err(dev, "Failed to add filter. Flow director filters on each port must have the same input set.\n"); + ice_flow_rem_prof(hw, ICE_BLK_FD, prof->id); + dev_err(dev, "Failed to add filter. Flow director filters on each port must have the same input set.\n"); return err; } @@ -1853,6 +1847,7 @@ int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd) struct ice_pf *pf; struct ice_hw *hw; int fltrs_needed; + u32 max_location; u16 tunnel_port; int ret; @@ -1884,8 +1879,10 @@ int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd) if (ret) return ret; - if (fsp->location >= ice_get_fdir_cnt_all(hw)) { - dev_err(dev, "Failed to add filter. The maximum number of flow director filters has been reached.\n"); + max_location = ice_get_fdir_cnt_all(hw); + if (fsp->location >= max_location) { + dev_err(dev, "Failed to add filter. The number of ntuple filters or provided location exceed max %d.\n", + max_location); return -ENOSPC; } @@ -1893,7 +1890,7 @@ int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd) fltrs_needed = ice_get_open_tunnel_port(hw, &tunnel_port, TNL_ALL) ? 2 : 1; if (!ice_fdir_find_fltr_by_idx(hw, fsp->location) && ice_fdir_num_avail_fltr(hw, pf->vsi[vsi->idx]) < fltrs_needed) { - dev_err(dev, "Failed to add filter. The maximum number of flow director filters has been reached.\n"); + dev_err(dev, "Failed to add filter. The maximum number of flow director filters has been reached.\n"); return -ENOSPC; } diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.c b/drivers/net/ethernet/intel/ice/ice_fdir.c index ae089d32ee..5840c3e04a 100644 --- a/drivers/net/ethernet/intel/ice/ice_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_fdir.c @@ -604,55 +604,32 @@ ice_set_fd_desc_val(struct ice_fd_fltr_desc_ctx *ctx, u64 qword; /* prep QW0 of FD filter programming desc */ - qword = ((u64)ctx->qindex << ICE_FXD_FLTR_QW0_QINDEX_S) & - ICE_FXD_FLTR_QW0_QINDEX_M; - qword |= ((u64)ctx->comp_q << ICE_FXD_FLTR_QW0_COMP_Q_S) & - ICE_FXD_FLTR_QW0_COMP_Q_M; - qword |= ((u64)ctx->comp_report << ICE_FXD_FLTR_QW0_COMP_REPORT_S) & - ICE_FXD_FLTR_QW0_COMP_REPORT_M; - qword |= ((u64)ctx->fd_space << ICE_FXD_FLTR_QW0_FD_SPACE_S) & - ICE_FXD_FLTR_QW0_FD_SPACE_M; - qword |= ((u64)ctx->cnt_index << ICE_FXD_FLTR_QW0_STAT_CNT_S) & - ICE_FXD_FLTR_QW0_STAT_CNT_M; - qword |= ((u64)ctx->cnt_ena << ICE_FXD_FLTR_QW0_STAT_ENA_S) & - ICE_FXD_FLTR_QW0_STAT_ENA_M; - qword |= ((u64)ctx->evict_ena << ICE_FXD_FLTR_QW0_EVICT_ENA_S) & - ICE_FXD_FLTR_QW0_EVICT_ENA_M; - qword |= ((u64)ctx->toq << ICE_FXD_FLTR_QW0_TO_Q_S) & - ICE_FXD_FLTR_QW0_TO_Q_M; - qword |= ((u64)ctx->toq_prio << ICE_FXD_FLTR_QW0_TO_Q_PRI_S) & - ICE_FXD_FLTR_QW0_TO_Q_PRI_M; - qword |= ((u64)ctx->dpu_recipe << ICE_FXD_FLTR_QW0_DPU_RECIPE_S) & - ICE_FXD_FLTR_QW0_DPU_RECIPE_M; - qword |= ((u64)ctx->drop << ICE_FXD_FLTR_QW0_DROP_S) & - ICE_FXD_FLTR_QW0_DROP_M; - qword |= ((u64)ctx->flex_prio << ICE_FXD_FLTR_QW0_FLEX_PRI_S) & - ICE_FXD_FLTR_QW0_FLEX_PRI_M; - qword |= ((u64)ctx->flex_mdid << ICE_FXD_FLTR_QW0_FLEX_MDID_S) & - ICE_FXD_FLTR_QW0_FLEX_MDID_M; - qword |= ((u64)ctx->flex_val << ICE_FXD_FLTR_QW0_FLEX_VAL_S) & - ICE_FXD_FLTR_QW0_FLEX_VAL_M; + qword = FIELD_PREP(ICE_FXD_FLTR_QW0_QINDEX_M, ctx->qindex); + qword |= FIELD_PREP(ICE_FXD_FLTR_QW0_COMP_Q_M, ctx->comp_q); + qword |= FIELD_PREP(ICE_FXD_FLTR_QW0_COMP_REPORT_M, ctx->comp_report); + qword |= FIELD_PREP(ICE_FXD_FLTR_QW0_FD_SPACE_M, ctx->fd_space); + qword |= FIELD_PREP(ICE_FXD_FLTR_QW0_STAT_CNT_M, ctx->cnt_index); + qword |= FIELD_PREP(ICE_FXD_FLTR_QW0_STAT_ENA_M, ctx->cnt_ena); + qword |= FIELD_PREP(ICE_FXD_FLTR_QW0_EVICT_ENA_M, ctx->evict_ena); + qword |= FIELD_PREP(ICE_FXD_FLTR_QW0_TO_Q_M, ctx->toq); + qword |= FIELD_PREP(ICE_FXD_FLTR_QW0_TO_Q_PRI_M, ctx->toq_prio); + qword |= FIELD_PREP(ICE_FXD_FLTR_QW0_DPU_RECIPE_M, ctx->dpu_recipe); + qword |= FIELD_PREP(ICE_FXD_FLTR_QW0_DROP_M, ctx->drop); + qword |= FIELD_PREP(ICE_FXD_FLTR_QW0_FLEX_PRI_M, ctx->flex_prio); + qword |= FIELD_PREP(ICE_FXD_FLTR_QW0_FLEX_MDID_M, ctx->flex_mdid); + qword |= FIELD_PREP(ICE_FXD_FLTR_QW0_FLEX_VAL_M, ctx->flex_val); fdir_desc->qidx_compq_space_stat = cpu_to_le64(qword); /* prep QW1 of FD filter programming desc */ - qword = ((u64)ctx->dtype << ICE_FXD_FLTR_QW1_DTYPE_S) & - ICE_FXD_FLTR_QW1_DTYPE_M; - qword |= ((u64)ctx->pcmd << ICE_FXD_FLTR_QW1_PCMD_S) & - ICE_FXD_FLTR_QW1_PCMD_M; - qword |= ((u64)ctx->desc_prof_prio << ICE_FXD_FLTR_QW1_PROF_PRI_S) & - ICE_FXD_FLTR_QW1_PROF_PRI_M; - qword |= ((u64)ctx->desc_prof << ICE_FXD_FLTR_QW1_PROF_S) & - ICE_FXD_FLTR_QW1_PROF_M; - qword |= ((u64)ctx->fd_vsi << ICE_FXD_FLTR_QW1_FD_VSI_S) & - ICE_FXD_FLTR_QW1_FD_VSI_M; - qword |= ((u64)ctx->swap << ICE_FXD_FLTR_QW1_SWAP_S) & - ICE_FXD_FLTR_QW1_SWAP_M; - qword |= ((u64)ctx->fdid_prio << ICE_FXD_FLTR_QW1_FDID_PRI_S) & - ICE_FXD_FLTR_QW1_FDID_PRI_M; - qword |= ((u64)ctx->fdid_mdid << ICE_FXD_FLTR_QW1_FDID_MDID_S) & - ICE_FXD_FLTR_QW1_FDID_MDID_M; - qword |= ((u64)ctx->fdid << ICE_FXD_FLTR_QW1_FDID_S) & - ICE_FXD_FLTR_QW1_FDID_M; + qword = FIELD_PREP(ICE_FXD_FLTR_QW1_DTYPE_M, ctx->dtype); + qword |= FIELD_PREP(ICE_FXD_FLTR_QW1_PCMD_M, ctx->pcmd); + qword |= FIELD_PREP(ICE_FXD_FLTR_QW1_PROF_PRI_M, ctx->desc_prof_prio); + qword |= FIELD_PREP(ICE_FXD_FLTR_QW1_PROF_M, ctx->desc_prof); + qword |= FIELD_PREP(ICE_FXD_FLTR_QW1_FD_VSI_M, ctx->fd_vsi); + qword |= FIELD_PREP(ICE_FXD_FLTR_QW1_SWAP_M, ctx->swap); + qword |= FIELD_PREP(ICE_FXD_FLTR_QW1_FDID_PRI_M, ctx->fdid_prio); + qword |= FIELD_PREP(ICE_FXD_FLTR_QW1_FDID_MDID_M, ctx->fdid_mdid); + qword |= FIELD_PREP(ICE_FXD_FLTR_QW1_FDID_M, ctx->fdid); fdir_desc->dtype_cmd_vsi_fdid = cpu_to_le64(qword); } diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c index 5ce4139659..20d5db88c9 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c +++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c @@ -1218,11 +1218,13 @@ ice_prof_has_mask(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 *masks) * @blk: HW block * @fv: field vector to search for * @masks: masks for FV + * @symm: symmetric setting for RSS flows * @prof_id: receives the profile ID */ static int ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk, - struct ice_fv_word *fv, u16 *masks, u8 *prof_id) + struct ice_fv_word *fv, u16 *masks, bool symm, + u8 *prof_id) { struct ice_es *es = &hw->blk[blk].es; u8 i; @@ -1236,6 +1238,9 @@ ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk, for (i = 0; i < (u8)es->count; i++) { u16 off = i * es->fvw; + if (blk == ICE_BLK_RSS && es->symm[i] != symm) + continue; + if (memcmp(&es->t[off], fv, es->fvw * sizeof(*fv))) continue; @@ -1409,13 +1414,13 @@ ice_write_prof_mask_reg(struct ice_hw *hw, enum ice_block blk, u16 mask_idx, switch (blk) { case ICE_BLK_RSS: offset = GLQF_HMASK(mask_idx); - val = (idx << GLQF_HMASK_MSK_INDEX_S) & GLQF_HMASK_MSK_INDEX_M; - val |= (mask << GLQF_HMASK_MASK_S) & GLQF_HMASK_MASK_M; + val = FIELD_PREP(GLQF_HMASK_MSK_INDEX_M, idx); + val |= FIELD_PREP(GLQF_HMASK_MASK_M, mask); break; case ICE_BLK_FD: offset = GLQF_FDMASK(mask_idx); - val = (idx << GLQF_FDMASK_MSK_INDEX_S) & GLQF_FDMASK_MSK_INDEX_M; - val |= (mask << GLQF_FDMASK_MASK_S) & GLQF_FDMASK_MASK_M; + val = FIELD_PREP(GLQF_FDMASK_MSK_INDEX_M, idx); + val |= FIELD_PREP(GLQF_FDMASK_MASK_M, mask); break; default: ice_debug(hw, ICE_DBG_PKG, "No profile masks for block %d\n", @@ -1716,15 +1721,16 @@ ice_update_prof_masking(struct ice_hw *hw, enum ice_block blk, u16 prof_id, } /** - * ice_write_es - write an extraction sequence to hardware + * ice_write_es - write an extraction sequence and symmetric setting to hardware * @hw: pointer to the HW struct * @blk: the block in which to write the extraction sequence * @prof_id: the profile ID to write * @fv: pointer to the extraction sequence to write - NULL to clear extraction + * @symm: symmetric setting for RSS profiles */ static void ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id, - struct ice_fv_word *fv) + struct ice_fv_word *fv, bool symm) { u16 off; @@ -1737,6 +1743,9 @@ ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id, memcpy(&hw->blk[blk].es.t[off], fv, hw->blk[blk].es.fvw * sizeof(*fv)); } + + if (blk == ICE_BLK_RSS) + hw->blk[blk].es.symm[prof_id] = symm; } /** @@ -1753,7 +1762,7 @@ ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id) if (hw->blk[blk].es.ref_count[prof_id] > 0) { if (!--hw->blk[blk].es.ref_count[prof_id]) { - ice_write_es(hw, blk, prof_id, NULL); + ice_write_es(hw, blk, prof_id, NULL, false); ice_free_prof_masks(hw, blk, prof_id); return ice_free_prof_id(hw, blk, prof_id); } @@ -2116,8 +2125,10 @@ void ice_free_hw_tbls(struct ice_hw *hw) devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof_redir.t); devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.t); devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.ref_count); + devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.symm); devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.written); devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.mask_ena); + devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof_id.id); } list_for_each_entry_safe(r, rt, &hw->rss_list_head, l_entry) { @@ -2150,6 +2161,7 @@ void ice_clear_hw_tbls(struct ice_hw *hw) for (i = 0; i < ICE_BLK_COUNT; i++) { struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir; + struct ice_prof_id *prof_id = &hw->blk[i].prof_id; struct ice_prof_tcam *prof = &hw->blk[i].prof; struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1; struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2; @@ -2178,8 +2190,11 @@ void ice_clear_hw_tbls(struct ice_hw *hw) memset(es->t, 0, es->count * sizeof(*es->t) * es->fvw); memset(es->ref_count, 0, es->count * sizeof(*es->ref_count)); + memset(es->symm, 0, es->count * sizeof(*es->symm)); memset(es->written, 0, es->count * sizeof(*es->written)); memset(es->mask_ena, 0, es->count * sizeof(*es->mask_ena)); + + memset(prof_id->id, 0, prof_id->count * sizeof(*prof_id->id)); } } @@ -2196,6 +2211,7 @@ int ice_init_hw_tbls(struct ice_hw *hw) ice_init_all_prof_masks(hw); for (i = 0; i < ICE_BLK_COUNT; i++) { struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir; + struct ice_prof_id *prof_id = &hw->blk[i].prof_id; struct ice_prof_tcam *prof = &hw->blk[i].prof; struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1; struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2; @@ -2292,6 +2308,11 @@ int ice_init_hw_tbls(struct ice_hw *hw) if (!es->ref_count) goto err; + es->symm = devm_kcalloc(ice_hw_to_dev(hw), es->count, + sizeof(*es->symm), GFP_KERNEL); + if (!es->symm) + goto err; + es->written = devm_kcalloc(ice_hw_to_dev(hw), es->count, sizeof(*es->written), GFP_KERNEL); if (!es->written) @@ -2301,6 +2322,12 @@ int ice_init_hw_tbls(struct ice_hw *hw) sizeof(*es->mask_ena), GFP_KERNEL); if (!es->mask_ena) goto err; + + prof_id->count = blk_sizes[i].prof_id; + prof_id->id = devm_kcalloc(ice_hw_to_dev(hw), prof_id->count, + sizeof(*prof_id->id), GFP_KERNEL); + if (!prof_id->id) + goto err; } return 0; @@ -2963,6 +2990,7 @@ ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype, * @attr_cnt: number of elements in attr array * @es: extraction sequence (length of array is determined by the block) * @masks: mask for extraction sequence + * @symm: symmetric setting for RSS profiles * * This function registers a profile, which matches a set of PTYPES with a * particular extraction sequence. While the hardware profile is allocated @@ -2972,7 +3000,7 @@ ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype, int ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], const struct ice_ptype_attributes *attr, u16 attr_cnt, - struct ice_fv_word *es, u16 *masks) + struct ice_fv_word *es, u16 *masks, bool symm) { u32 bytes = DIV_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE); DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT); @@ -2986,7 +3014,7 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], mutex_lock(&hw->blk[blk].es.prof_map_lock); /* search for existing profile */ - status = ice_find_prof_id_with_mask(hw, blk, es, masks, &prof_id); + status = ice_find_prof_id_with_mask(hw, blk, es, masks, symm, &prof_id); if (status) { /* allocate profile ID */ status = ice_alloc_prof_id(hw, blk, &prof_id); @@ -3009,7 +3037,7 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], goto err_ice_add_prof; /* and write new es */ - ice_write_es(hw, blk, prof_id, es); + ice_write_es(hw, blk, prof_id, es, symm); } ice_prof_inc_ref(hw, blk, prof_id); @@ -3097,7 +3125,7 @@ err_ice_add_prof: * This will search for a profile tracking ID which was previously added. * The profile map lock should be held before calling this function. */ -static struct ice_prof_map * +struct ice_prof_map * ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id) { struct ice_prof_map *entry = NULL; diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h index 7af7c8e9aa..b39d7cdc38 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h +++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h @@ -42,7 +42,9 @@ bool ice_hw_ptype_ena(struct ice_hw *hw, u16 ptype); int ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], const struct ice_ptype_attributes *attr, u16 attr_cnt, - struct ice_fv_word *es, u16 *masks); + struct ice_fv_word *es, u16 *masks, bool symm); +struct ice_prof_map * +ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id); int ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl); int diff --git a/drivers/net/ethernet/intel/ice/ice_flex_type.h b/drivers/net/ethernet/intel/ice/ice_flex_type.h index 4f42e14ed3..d427a79d00 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_type.h +++ b/drivers/net/ethernet/intel/ice/ice_flex_type.h @@ -146,6 +146,7 @@ struct ice_es { u32 *mask_ena; struct list_head prof_map; struct ice_fv_word *t; + u8 *symm; /* symmetric setting per profile (RSS blk)*/ struct mutex prof_map_lock; /* protect access to profiles list */ u8 *written; u8 reverse; /* set to true to reverse FV order */ @@ -304,10 +305,16 @@ struct ice_masks { struct ice_mask masks[ICE_PROF_MASK_COUNT]; }; +struct ice_prof_id { + unsigned long *id; + int count; +}; + /* Tables per block */ struct ice_blk_info { struct ice_xlt1 xlt1; struct ice_xlt2 xlt2; + struct ice_prof_id prof_id; struct ice_prof_tcam prof; struct ice_prof_redir prof_redir; struct ice_es es; diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c index fb8b925aaf..fc2b58f562 100644 --- a/drivers/net/ethernet/intel/ice/ice_flow.c +++ b/drivers/net/ethernet/intel/ice/ice_flow.c @@ -1235,6 +1235,7 @@ ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params) #define ICE_FLOW_FIND_PROF_CHK_FLDS 0x00000001 #define ICE_FLOW_FIND_PROF_CHK_VSI 0x00000002 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR 0x00000004 +#define ICE_FLOW_FIND_PROF_CHK_SYMM 0x00000008 /** * ice_flow_find_prof_conds - Find a profile matching headers and conditions @@ -1243,13 +1244,14 @@ ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params) * @dir: flow direction * @segs: array of one or more packet segments that describe the flow * @segs_cnt: number of packet segments provided + * @symm: symmetric setting for RSS profiles * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI) * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*) */ static struct ice_flow_prof * ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir, struct ice_flow_seg_info *segs, - u8 segs_cnt, u16 vsi_handle, u32 conds) + u8 segs_cnt, bool symm, u16 vsi_handle, u32 conds) { struct ice_flow_prof *p, *prof = NULL; @@ -1265,6 +1267,11 @@ ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk, !test_bit(vsi_handle, p->vsis)) continue; + /* Check for symmetric settings */ + if ((conds & ICE_FLOW_FIND_PROF_CHK_SYMM) && + p->symm != symm) + continue; + /* Protocol headers must be checked. Matched fields are * checked if specified. */ @@ -1328,26 +1335,33 @@ ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block __always_unused blk, * @hw: pointer to the HW struct * @blk: classification stage * @dir: flow direction - * @prof_id: unique ID to identify this flow profile * @segs: array of one or more packet segments that describe the flow * @segs_cnt: number of packet segments provided + * @symm: symmetric setting for RSS profiles * @prof: stores the returned flow profile added * * Assumption: the caller has acquired the lock to the profile list */ static int ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk, - enum ice_flow_dir dir, u64 prof_id, + enum ice_flow_dir dir, struct ice_flow_seg_info *segs, u8 segs_cnt, - struct ice_flow_prof **prof) + bool symm, struct ice_flow_prof **prof) { struct ice_flow_prof_params *params; + struct ice_prof_id *ids; int status; + u64 prof_id; u8 i; if (!prof) return -EINVAL; + ids = &hw->blk[blk].prof_id; + prof_id = find_first_zero_bit(ids->id, ids->count); + if (prof_id >= ids->count) + return -ENOSPC; + params = kzalloc(sizeof(*params), GFP_KERNEL); if (!params) return -ENOMEM; @@ -1369,6 +1383,7 @@ ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk, params->prof->id = prof_id; params->prof->dir = dir; params->prof->segs_cnt = segs_cnt; + params->prof->symm = symm; /* Make a copy of the segments that need to be persistent in the flow * profile instance @@ -1385,7 +1400,7 @@ ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk, /* Add a HW profile for this flow profile */ status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes, params->attr, params->attr_cnt, params->es, - params->mask); + params->mask, symm); if (status) { ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n"); goto out; @@ -1393,6 +1408,7 @@ ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk, INIT_LIST_HEAD(¶ms->prof->entries); mutex_init(¶ms->prof->entries_lock); + set_bit(prof_id, ids->id); *prof = params->prof; out: @@ -1436,6 +1452,7 @@ ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk, /* Remove all hardware profiles associated with this flow profile */ status = ice_rem_prof(hw, blk, prof->id); if (!status) { + clear_bit(prof->id, hw->blk[blk].prof_id.id); list_del(&prof->l_entry); mutex_destroy(&prof->entries_lock); devm_kfree(ice_hw_to_dev(hw), prof); @@ -1511,15 +1528,15 @@ ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk, * @hw: pointer to the HW struct * @blk: classification stage * @dir: flow direction - * @prof_id: unique ID to identify this flow profile * @segs: array of one or more packet segments that describe the flow * @segs_cnt: number of packet segments provided + * @symm: symmetric setting for RSS profiles * @prof: stores the returned flow profile added */ int ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir, - u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt, - struct ice_flow_prof **prof) + struct ice_flow_seg_info *segs, u8 segs_cnt, + bool symm, struct ice_flow_prof **prof) { int status; @@ -1538,8 +1555,8 @@ ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir, mutex_lock(&hw->fl_profs_locks[blk]); - status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt, - prof); + status = ice_flow_add_prof_sync(hw, blk, dir, segs, segs_cnt, + symm, prof); if (!status) list_add(&(*prof)->l_entry, &hw->fl_profs[blk]); @@ -1855,37 +1872,49 @@ int ice_flow_rem_vsi_prof(struct ice_hw *hw, u16 vsi_handle, u64 prof_id) /** * ice_flow_set_rss_seg_info - setup packet segments for RSS * @segs: pointer to the flow field segment(s) - * @hash_fields: fields to be hashed on for the segment(s) - * @flow_hdr: protocol header fields within a packet segment + * @seg_cnt: segment count + * @cfg: configure parameters * * Helper function to extract fields from hash bitmap and use flow * header value to set flow field segment for further use in flow * profile entry or removal. */ static int -ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields, - u32 flow_hdr) +ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u8 seg_cnt, + const struct ice_rss_hash_cfg *cfg) { + struct ice_flow_seg_info *seg; u64 val; - u8 i; + u16 i; - for_each_set_bit(i, (unsigned long *)&hash_fields, - ICE_FLOW_FIELD_IDX_MAX) - ice_flow_set_fld(segs, (enum ice_flow_field)i, + /* set inner most segment */ + seg = &segs[seg_cnt - 1]; + + for_each_set_bit(i, (const unsigned long *)&cfg->hash_flds, + (u16)ICE_FLOW_FIELD_IDX_MAX) + ice_flow_set_fld(seg, (enum ice_flow_field)i, ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, false); - ICE_FLOW_SET_HDRS(segs, flow_hdr); + ICE_FLOW_SET_HDRS(seg, cfg->addl_hdrs); + + /* set outer most header */ + if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV4) + segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER; + else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV6) + segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER; - if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS & + if (seg->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS & ~ICE_FLOW_RSS_HDRS_INNER_MASK & ~ICE_FLOW_SEG_HDR_IPV_OTHER) return -EINVAL; - val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS); + val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS); if (val && !is_power_of_2(val)) return -EIO; - val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS); + val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS); if (val && !is_power_of_2(val)) return -EIO; @@ -1955,6 +1984,39 @@ int ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle) return status; } +/** + * ice_get_rss_hdr_type - get a RSS profile's header type + * @prof: RSS flow profile + */ +static enum ice_rss_cfg_hdr_type +ice_get_rss_hdr_type(struct ice_flow_prof *prof) +{ + if (prof->segs_cnt == ICE_FLOW_SEG_SINGLE) { + return ICE_RSS_OUTER_HEADERS; + } else if (prof->segs_cnt == ICE_FLOW_SEG_MAX) { + const struct ice_flow_seg_info *s; + + s = &prof->segs[ICE_RSS_OUTER_HEADERS]; + if (s->hdrs == ICE_FLOW_SEG_HDR_NONE) + return ICE_RSS_INNER_HEADERS; + if (s->hdrs & ICE_FLOW_SEG_HDR_IPV4) + return ICE_RSS_INNER_HEADERS_W_OUTER_IPV4; + if (s->hdrs & ICE_FLOW_SEG_HDR_IPV6) + return ICE_RSS_INNER_HEADERS_W_OUTER_IPV6; + } + + return ICE_RSS_ANY_HEADERS; +} + +static bool +ice_rss_match_prof(struct ice_rss_cfg *r, struct ice_flow_prof *prof, + enum ice_rss_cfg_hdr_type hdr_type) +{ + return (r->hash.hdr_type == hdr_type && + r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match && + r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs); +} + /** * ice_rem_rss_list - remove RSS configuration from list * @hw: pointer to the hardware structure @@ -1966,15 +2028,16 @@ int ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle) static void ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof) { + enum ice_rss_cfg_hdr_type hdr_type; struct ice_rss_cfg *r, *tmp; /* Search for RSS hash fields associated to the VSI that match the * hash configurations associated to the flow profile. If found * remove from the RSS entry list of the VSI context and delete entry. */ + hdr_type = ice_get_rss_hdr_type(prof); list_for_each_entry_safe(r, tmp, &hw->rss_list_head, l_entry) - if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match && - r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) { + if (ice_rss_match_prof(r, prof, hdr_type)) { clear_bit(vsi_handle, r->vsis); if (bitmap_empty(r->vsis, ICE_MAX_VSI)) { list_del(&r->l_entry); @@ -1995,11 +2058,12 @@ ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof) static int ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof) { + enum ice_rss_cfg_hdr_type hdr_type; struct ice_rss_cfg *r, *rss_cfg; + hdr_type = ice_get_rss_hdr_type(prof); list_for_each_entry(r, &hw->rss_list_head, l_entry) - if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match && - r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) { + if (ice_rss_match_prof(r, prof, hdr_type)) { set_bit(vsi_handle, r->vsis); return 0; } @@ -2009,8 +2073,10 @@ ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof) if (!rss_cfg) return -ENOMEM; - rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match; - rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs; + rss_cfg->hash.hash_flds = prof->segs[prof->segs_cnt - 1].match; + rss_cfg->hash.addl_hdrs = prof->segs[prof->segs_cnt - 1].hdrs; + rss_cfg->hash.hdr_type = hdr_type; + rss_cfg->hash.symm = prof->symm; set_bit(vsi_handle, rss_cfg->vsis); list_add_tail(&rss_cfg->l_entry, &hw->rss_list_head); @@ -2018,65 +2084,177 @@ ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof) return 0; } -#define ICE_FLOW_PROF_HASH_S 0 -#define ICE_FLOW_PROF_HASH_M (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S) -#define ICE_FLOW_PROF_HDR_S 32 -#define ICE_FLOW_PROF_HDR_M (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S) -#define ICE_FLOW_PROF_ENCAP_S 63 -#define ICE_FLOW_PROF_ENCAP_M (BIT_ULL(ICE_FLOW_PROF_ENCAP_S)) +/** + * ice_rss_config_xor_word - set the HSYMM registers for one input set word + * @hw: pointer to the hardware structure + * @prof_id: RSS hardware profile id + * @src: the FV index used by the protocol's source field + * @dst: the FV index used by the protocol's destination field + * + * Write to the HSYMM register with the index of @src FV the value of the @dst + * FV index. This will tell the hardware to XOR HSYMM[src] with INSET[dst] + * while calculating the RSS input set. + */ +static void +ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst) +{ + u32 val, reg, bits_shift; + u8 reg_idx; + + reg_idx = src / GLQF_HSYMM_REG_SIZE; + bits_shift = ((src % GLQF_HSYMM_REG_SIZE) << 3); + val = dst | GLQF_HSYMM_ENABLE_BIT; + + reg = rd32(hw, GLQF_HSYMM(prof_id, reg_idx)); + reg = (reg & ~(0xff << bits_shift)) | (val << bits_shift); + wr32(hw, GLQF_HSYMM(prof_id, reg_idx), reg); +} -#define ICE_RSS_OUTER_HEADERS 1 -#define ICE_RSS_INNER_HEADERS 2 +/** + * ice_rss_config_xor - set the symmetric registers for a profile's protocol + * @hw: pointer to the hardware structure + * @prof_id: RSS hardware profile id + * @src: the FV index used by the protocol's source field + * @dst: the FV index used by the protocol's destination field + * @len: length of the source/destination fields in words + */ +static void +ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len) +{ + int fv_last_word = + ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1; + int i; + + for (i = 0; i < len; i++) { + ice_rss_config_xor_word(hw, prof_id, + /* Yes, field vector in GLQF_HSYMM and + * GLQF_HINSET is inversed! + */ + fv_last_word - (src + i), + fv_last_word - (dst + i)); + ice_rss_config_xor_word(hw, prof_id, + fv_last_word - (dst + i), + fv_last_word - (src + i)); + } +} -/* Flow profile ID format: - * [0:31] - Packet match fields - * [32:62] - Protocol header - * [63] - Encapsulation flag, 0 if non-tunneled, 1 if tunneled +/** + * ice_rss_set_symm - set the symmetric settings for an RSS profile + * @hw: pointer to the hardware structure + * @prof: pointer to flow profile + * + * The symmetric hash will result from XORing the protocol's fields with + * indexes in GLQF_HSYMM and GLQF_HINSET. This function configures the profile's + * GLQF_HSYMM registers. */ -#define ICE_FLOW_GEN_PROFID(hash, hdr, segs_cnt) \ - ((u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \ - (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \ - ((u8)((segs_cnt) - 1) ? ICE_FLOW_PROF_ENCAP_M : 0))) +static void ice_rss_set_symm(struct ice_hw *hw, struct ice_flow_prof *prof) +{ + struct ice_prof_map *map; + u8 prof_id, m; + + mutex_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock); + map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id); + if (map) + prof_id = map->prof_id; + mutex_unlock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock); + + if (!map) + return; + + /* clear to default */ + for (m = 0; m < GLQF_HSYMM_REG_PER_PROF; m++) + wr32(hw, GLQF_HSYMM(prof_id, m), 0); + + if (prof->symm) { + struct ice_flow_seg_xtrct *ipv4_src, *ipv4_dst; + struct ice_flow_seg_xtrct *ipv6_src, *ipv6_dst; + struct ice_flow_seg_xtrct *sctp_src, *sctp_dst; + struct ice_flow_seg_xtrct *tcp_src, *tcp_dst; + struct ice_flow_seg_xtrct *udp_src, *udp_dst; + struct ice_flow_seg_info *seg; + + seg = &prof->segs[prof->segs_cnt - 1]; + + ipv4_src = &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct; + ipv4_dst = &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct; + + ipv6_src = &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct; + ipv6_dst = &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct; + + tcp_src = &seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct; + tcp_dst = &seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct; + + udp_src = &seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct; + udp_dst = &seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct; + + sctp_src = &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct; + sctp_dst = &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct; + + /* xor IPv4 */ + if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0) + ice_rss_config_xor(hw, prof_id, + ipv4_src->idx, ipv4_dst->idx, 2); + + /* xor IPv6 */ + if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0) + ice_rss_config_xor(hw, prof_id, + ipv6_src->idx, ipv6_dst->idx, 8); + + /* xor TCP */ + if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0) + ice_rss_config_xor(hw, prof_id, + tcp_src->idx, tcp_dst->idx, 1); + + /* xor UDP */ + if (udp_src->prot_id != 0 && udp_dst->prot_id != 0) + ice_rss_config_xor(hw, prof_id, + udp_src->idx, udp_dst->idx, 1); + + /* xor SCTP */ + if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0) + ice_rss_config_xor(hw, prof_id, + sctp_src->idx, sctp_dst->idx, 1); + } +} /** * ice_add_rss_cfg_sync - add an RSS configuration * @hw: pointer to the hardware structure * @vsi_handle: software VSI handle - * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure - * @addl_hdrs: protocol header fields - * @segs_cnt: packet segment count + * @cfg: configure parameters * * Assumption: lock has already been acquired for RSS list */ static int -ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, - u32 addl_hdrs, u8 segs_cnt) +ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, + const struct ice_rss_hash_cfg *cfg) { const enum ice_block blk = ICE_BLK_RSS; struct ice_flow_prof *prof = NULL; struct ice_flow_seg_info *segs; + u8 segs_cnt; int status; - if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX) - return -EINVAL; + segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ? + ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX; segs = kcalloc(segs_cnt, sizeof(*segs), GFP_KERNEL); if (!segs) return -ENOMEM; /* Construct the packet segment info from the hashed fields */ - status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds, - addl_hdrs); + status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg); if (status) goto exit; - /* Search for a flow profile that has matching headers, hash fields - * and has the input VSI associated to it. If found, no further + /* Search for a flow profile that has matching headers, hash fields, + * symm and has the input VSI associated to it. If found, no further * operations required and exit. */ prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt, - vsi_handle, + cfg->symm, vsi_handle, ICE_FLOW_FIND_PROF_CHK_FLDS | + ICE_FLOW_FIND_PROF_CHK_SYMM | ICE_FLOW_FIND_PROF_CHK_VSI); if (prof) goto exit; @@ -2087,7 +2265,8 @@ ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, * the protocol header and new hash field configuration. */ prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt, - vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI); + cfg->symm, vsi_handle, + ICE_FLOW_FIND_PROF_CHK_VSI); if (prof) { status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle); if (!status) @@ -2103,11 +2282,12 @@ ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, } } - /* Search for a profile that has same match fields only. If this - * exists then associate the VSI to this profile. + /* Search for a profile that has the same match fields and symmetric + * setting. If this exists then associate the VSI to this profile. */ prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt, - vsi_handle, + cfg->symm, vsi_handle, + ICE_FLOW_FIND_PROF_CHK_SYMM | ICE_FLOW_FIND_PROF_CHK_FLDS); if (prof) { status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle); @@ -2116,17 +2296,14 @@ ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, goto exit; } - /* Create a new flow profile with generated profile and packet - * segment information. - */ + /* Create a new flow profile with packet segment information. */ status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX, - ICE_FLOW_GEN_PROFID(hashed_flds, - segs[segs_cnt - 1].hdrs, - segs_cnt), - segs, segs_cnt, &prof); + segs, segs_cnt, cfg->symm, &prof); if (status) goto exit; + prof->symm = cfg->symm; + ice_rss_set_symm(hw, prof); status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle); /* If association to a new flow profile failed then this profile can * be removed. @@ -2146,30 +2323,43 @@ exit: /** * ice_add_rss_cfg - add an RSS configuration with specified hashed fields * @hw: pointer to the hardware structure - * @vsi_handle: software VSI handle - * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure - * @addl_hdrs: protocol header fields + * @vsi: VSI to add the RSS configuration to + * @cfg: configure parameters * * This function will generate a flow profile based on fields associated with * the input fields to hash on, the flow type and use the VSI number to add * a flow entry to the profile. */ int -ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, - u32 addl_hdrs) +ice_add_rss_cfg(struct ice_hw *hw, struct ice_vsi *vsi, + const struct ice_rss_hash_cfg *cfg) { + struct ice_rss_hash_cfg local_cfg; + u16 vsi_handle; int status; - if (hashed_flds == ICE_HASH_INVALID || - !ice_is_vsi_valid(hw, vsi_handle)) + if (!vsi) + return -EINVAL; + + vsi_handle = vsi->idx; + if (!ice_is_vsi_valid(hw, vsi_handle) || + !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS || + cfg->hash_flds == ICE_HASH_INVALID) return -EINVAL; mutex_lock(&hw->rss_locks); - status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs, - ICE_RSS_OUTER_HEADERS); - if (!status) - status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, - addl_hdrs, ICE_RSS_INNER_HEADERS); + local_cfg = *cfg; + if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) { + status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg); + } else { + local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS; + status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg); + if (!status) { + local_cfg.hdr_type = ICE_RSS_INNER_HEADERS; + status = ice_add_rss_cfg_sync(hw, vsi_handle, + &local_cfg); + } + } mutex_unlock(&hw->rss_locks); return status; @@ -2179,33 +2369,33 @@ ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, * ice_rem_rss_cfg_sync - remove an existing RSS configuration * @hw: pointer to the hardware structure * @vsi_handle: software VSI handle - * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove - * @addl_hdrs: Protocol header fields within a packet segment - * @segs_cnt: packet segment count + * @cfg: configure parameters * * Assumption: lock has already been acquired for RSS list */ static int -ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, - u32 addl_hdrs, u8 segs_cnt) +ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, + const struct ice_rss_hash_cfg *cfg) { const enum ice_block blk = ICE_BLK_RSS; struct ice_flow_seg_info *segs; struct ice_flow_prof *prof; + u8 segs_cnt; int status; + segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ? + ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX; segs = kcalloc(segs_cnt, sizeof(*segs), GFP_KERNEL); if (!segs) return -ENOMEM; /* Construct the packet segment info from the hashed fields */ - status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds, - addl_hdrs); + status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg); if (status) goto out; prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt, - vsi_handle, + cfg->symm, vsi_handle, ICE_FLOW_FIND_PROF_CHK_FLDS); if (!prof) { status = -ENOENT; @@ -2233,31 +2423,39 @@ out: * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields * @hw: pointer to the hardware structure * @vsi_handle: software VSI handle - * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove - * @addl_hdrs: Protocol header fields within a packet segment + * @cfg: configure parameters * * This function will lookup the flow profile based on the input * hash field bitmap, iterate through the profile entry list of * that profile and find entry associated with input VSI to be - * removed. Calls are made to underlying flow s which will APIs + * removed. Calls are made to underlying flow apis which will in * turn build or update buffers for RSS XLT1 section. */ -int __maybe_unused -ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, - u32 addl_hdrs) +int +ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, + const struct ice_rss_hash_cfg *cfg) { + struct ice_rss_hash_cfg local_cfg; int status; - if (hashed_flds == ICE_HASH_INVALID || - !ice_is_vsi_valid(hw, vsi_handle)) + if (!ice_is_vsi_valid(hw, vsi_handle) || + !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS || + cfg->hash_flds == ICE_HASH_INVALID) return -EINVAL; mutex_lock(&hw->rss_locks); - status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs, - ICE_RSS_OUTER_HEADERS); - if (!status) - status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, - addl_hdrs, ICE_RSS_INNER_HEADERS); + local_cfg = *cfg; + if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) { + status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg); + } else { + local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS; + status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg); + if (!status) { + local_cfg.hdr_type = ICE_RSS_INNER_HEADERS; + status = ice_rem_rss_cfg_sync(hw, vsi_handle, + &local_cfg); + } + } mutex_unlock(&hw->rss_locks); return status; @@ -2298,18 +2496,24 @@ ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, /** * ice_add_avf_rss_cfg - add an RSS configuration for AVF driver * @hw: pointer to the hardware structure - * @vsi_handle: software VSI handle + * @vsi: VF's VSI * @avf_hash: hash bit fields (ICE_AVF_FLOW_FIELD_*) to configure * * This function will take the hash bitmap provided by the AVF driver via a * message, convert it to ICE-compatible values, and configure RSS flow * profiles. */ -int ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash) +int ice_add_avf_rss_cfg(struct ice_hw *hw, struct ice_vsi *vsi, u64 avf_hash) { + struct ice_rss_hash_cfg hcfg; + u16 vsi_handle; int status = 0; u64 hash_flds; + if (!vsi) + return -EINVAL; + + vsi_handle = vsi->idx; if (avf_hash == ICE_AVF_FLOW_FIELD_INVALID || !ice_is_vsi_valid(hw, vsi_handle)) return -EINVAL; @@ -2379,8 +2583,11 @@ int ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash) if (rss_hash == ICE_HASH_INVALID) return -EIO; - status = ice_add_rss_cfg(hw, vsi_handle, rss_hash, - ICE_FLOW_SEG_HDR_NONE); + hcfg.addl_hdrs = ICE_FLOW_SEG_HDR_NONE; + hcfg.hash_flds = rss_hash; + hcfg.hdr_type = ICE_RSS_ANY_HEADERS; + hcfg.symm = false; + status = ice_add_rss_cfg(hw, vsi, &hcfg); if (status) break; } @@ -2388,6 +2595,54 @@ int ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash) return status; } +static bool rss_cfg_symm_valid(u64 hfld) +{ + return !((!!(hfld & ICE_FLOW_HASH_FLD_IPV4_SA) ^ + !!(hfld & ICE_FLOW_HASH_FLD_IPV4_DA)) || + (!!(hfld & ICE_FLOW_HASH_FLD_IPV6_SA) ^ + !!(hfld & ICE_FLOW_HASH_FLD_IPV6_DA)) || + (!!(hfld & ICE_FLOW_HASH_FLD_TCP_SRC_PORT) ^ + !!(hfld & ICE_FLOW_HASH_FLD_TCP_DST_PORT)) || + (!!(hfld & ICE_FLOW_HASH_FLD_UDP_SRC_PORT) ^ + !!(hfld & ICE_FLOW_HASH_FLD_UDP_DST_PORT)) || + (!!(hfld & ICE_FLOW_HASH_FLD_SCTP_SRC_PORT) ^ + !!(hfld & ICE_FLOW_HASH_FLD_SCTP_DST_PORT))); +} + +/** + * ice_set_rss_cfg_symm - set symmtery for all VSI's RSS configurations + * @hw: pointer to the hardware structure + * @vsi: VSI to set/unset Symmetric RSS + * @symm: TRUE to set Symmetric RSS hashing + */ +int ice_set_rss_cfg_symm(struct ice_hw *hw, struct ice_vsi *vsi, bool symm) +{ + struct ice_rss_hash_cfg local; + struct ice_rss_cfg *r, *tmp; + u16 vsi_handle = vsi->idx; + int status = 0; + + if (!ice_is_vsi_valid(hw, vsi_handle)) + return -EINVAL; + + mutex_lock(&hw->rss_locks); + list_for_each_entry_safe(r, tmp, &hw->rss_list_head, l_entry) { + if (test_bit(vsi_handle, r->vsis) && r->hash.symm != symm) { + local = r->hash; + local.symm = symm; + if (symm && !rss_cfg_symm_valid(r->hash.hash_flds)) + continue; + + status = ice_add_rss_cfg_sync(hw, vsi_handle, &local); + if (status) + break; + } + } + mutex_unlock(&hw->rss_locks); + + return status; +} + /** * ice_replay_rss_cfg - replay RSS configurations associated with VSI * @hw: pointer to the hardware structure @@ -2404,16 +2659,7 @@ int ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle) mutex_lock(&hw->rss_locks); list_for_each_entry(r, &hw->rss_list_head, l_entry) { if (test_bit(vsi_handle, r->vsis)) { - status = ice_add_rss_cfg_sync(hw, vsi_handle, - r->hashed_flds, - r->packet_hdr, - ICE_RSS_OUTER_HEADERS); - if (status) - break; - status = ice_add_rss_cfg_sync(hw, vsi_handle, - r->hashed_flds, - r->packet_hdr, - ICE_RSS_INNER_HEADERS); + status = ice_add_rss_cfg_sync(hw, vsi_handle, &r->hash); if (status) break; } @@ -2428,11 +2674,12 @@ int ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle) * @hw: pointer to the hardware structure * @vsi_handle: software VSI handle * @hdrs: protocol header type + * @symm: whether the RSS is symmetric (bool, output) * * This function will return the match fields of the first instance of flow * profile having the given header types and containing input VSI */ -u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs) +u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs, bool *symm) { u64 rss_hash = ICE_HASH_INVALID; struct ice_rss_cfg *r; @@ -2444,8 +2691,9 @@ u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs) mutex_lock(&hw->rss_locks); list_for_each_entry(r, &hw->rss_list_head, l_entry) if (test_bit(vsi_handle, r->vsis) && - r->packet_hdr == hdrs) { - rss_hash = r->hashed_flds; + r->hash.addl_hdrs == hdrs) { + rss_hash = r->hash.hash_flds; + *symm = r->hash.symm; break; } mutex_unlock(&hw->rss_locks); diff --git a/drivers/net/ethernet/intel/ice/ice_flow.h b/drivers/net/ethernet/intel/ice/ice_flow.h index 96923ef0a5..ff82915ab4 100644 --- a/drivers/net/ethernet/intel/ice/ice_flow.h +++ b/drivers/net/ethernet/intel/ice/ice_flow.h @@ -34,6 +34,8 @@ #define ICE_HASH_TCP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_TCP_PORT) #define ICE_HASH_UDP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_UDP_PORT) #define ICE_HASH_UDP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_UDP_PORT) +#define ICE_HASH_SCTP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_SCTP_PORT) +#define ICE_HASH_SCTP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_SCTP_PORT) #define ICE_FLOW_HASH_GTP_TEID \ (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPC_TEID)) @@ -227,6 +229,19 @@ enum ice_flow_field { ICE_FLOW_FIELD_IDX_MAX }; +#define ICE_FLOW_HASH_FLD_IPV4_SA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) +#define ICE_FLOW_HASH_FLD_IPV6_SA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) +#define ICE_FLOW_HASH_FLD_IPV4_DA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) +#define ICE_FLOW_HASH_FLD_IPV6_DA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA) +#define ICE_FLOW_HASH_FLD_TCP_SRC_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT) +#define ICE_FLOW_HASH_FLD_TCP_DST_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT) +#define ICE_FLOW_HASH_FLD_UDP_SRC_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT) +#define ICE_FLOW_HASH_FLD_UDP_DST_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT) +#define ICE_FLOW_HASH_FLD_SCTP_SRC_PORT \ + BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT) +#define ICE_FLOW_HASH_FLD_SCTP_DST_PORT \ + BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT) + /* Flow headers and fields for AVF support */ enum ice_flow_avf_hdr_field { /* Values 0 - 28 are reserved for future use */ @@ -279,6 +294,24 @@ enum ice_flow_avf_hdr_field { BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \ BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP)) +enum ice_rss_cfg_hdr_type { + ICE_RSS_OUTER_HEADERS, /* take outer headers as inputset. */ + ICE_RSS_INNER_HEADERS, /* take inner headers as inputset. */ + /* take inner headers as inputset for packet with outer ipv4. */ + ICE_RSS_INNER_HEADERS_W_OUTER_IPV4, + /* take inner headers as inputset for packet with outer ipv6. */ + ICE_RSS_INNER_HEADERS_W_OUTER_IPV6, + /* take outer headers first then inner headers as inputset */ + ICE_RSS_ANY_HEADERS +}; + +struct ice_rss_hash_cfg { + u32 addl_hdrs; /* protocol header fields */ + u64 hash_flds; /* hash bit field (ICE_FLOW_HASH_*) to configure */ + enum ice_rss_cfg_hdr_type hdr_type; /* to specify inner or outer */ + bool symm; /* symmetric or asymmetric hash */ +}; + enum ice_flow_dir { ICE_FLOW_RX = 0x02, }; @@ -289,8 +322,10 @@ enum ice_flow_priority { ICE_FLOW_PRIO_HIGH }; +#define ICE_FLOW_SEG_SINGLE 1 #define ICE_FLOW_SEG_MAX 2 #define ICE_FLOW_SEG_RAW_FLD_MAX 2 +#define ICE_FLOW_SW_FIELD_VECTOR_MAX 48 #define ICE_FLOW_FV_EXTRACT_SZ 2 #define ICE_FLOW_SET_HDRS(seg, val) ((seg)->hdrs |= (u32)(val)) @@ -372,20 +407,21 @@ struct ice_flow_prof { /* software VSI handles referenced by this flow profile */ DECLARE_BITMAP(vsis, ICE_MAX_VSI); + + bool symm; /* Symmetric Hash for RSS */ }; struct ice_rss_cfg { struct list_head l_entry; /* bitmap of VSIs added to the RSS entry */ DECLARE_BITMAP(vsis, ICE_MAX_VSI); - u64 hashed_flds; - u32 packet_hdr; + struct ice_rss_hash_cfg hash; }; int ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir, - u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt, - struct ice_flow_prof **prof); + struct ice_flow_seg_info *segs, u8 segs_cnt, + bool symm, struct ice_flow_prof **prof); int ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id); int ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id, @@ -401,13 +437,13 @@ ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len, int ice_flow_rem_vsi_prof(struct ice_hw *hw, u16 vsi_handle, u64 prof_id); void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle); int ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle); -int ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds); +int ice_set_rss_cfg_symm(struct ice_hw *hw, struct ice_vsi *vsi, bool symm); +int ice_add_avf_rss_cfg(struct ice_hw *hw, struct ice_vsi *vsi, + u64 hashed_flds); int ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle); -int -ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, - u32 addl_hdrs); -int -ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, - u32 addl_hdrs); -u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs); +int ice_add_rss_cfg(struct ice_hw *hw, struct ice_vsi *vsi, + const struct ice_rss_hash_cfg *cfg); +int ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, + const struct ice_rss_hash_cfg *cfg); +u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs, bool *symm); #endif /* _ICE_FLOW_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_fwlog.c b/drivers/net/ethernet/intel/ice/ice_fwlog.c new file mode 100644 index 0000000000..92b5dac481 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_fwlog.c @@ -0,0 +1,470 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022, Intel Corporation. */ + +#include +#include "ice.h" +#include "ice_common.h" +#include "ice_fwlog.h" + +bool ice_fwlog_ring_full(struct ice_fwlog_ring *rings) +{ + u16 head, tail; + + head = rings->head; + tail = rings->tail; + + if (head < tail && (tail - head == (rings->size - 1))) + return true; + else if (head > tail && (tail == (head - 1))) + return true; + + return false; +} + +bool ice_fwlog_ring_empty(struct ice_fwlog_ring *rings) +{ + return rings->head == rings->tail; +} + +void ice_fwlog_ring_increment(u16 *item, u16 size) +{ + *item = (*item + 1) & (size - 1); +} + +static int ice_fwlog_alloc_ring_buffs(struct ice_fwlog_ring *rings) +{ + int i, nr_bytes; + u8 *mem; + + nr_bytes = rings->size * ICE_AQ_MAX_BUF_LEN; + mem = vzalloc(nr_bytes); + if (!mem) + return -ENOMEM; + + for (i = 0; i < rings->size; i++) { + struct ice_fwlog_data *ring = &rings->rings[i]; + + ring->data_size = ICE_AQ_MAX_BUF_LEN; + ring->data = mem; + mem += ICE_AQ_MAX_BUF_LEN; + } + + return 0; +} + +static void ice_fwlog_free_ring_buffs(struct ice_fwlog_ring *rings) +{ + int i; + + for (i = 0; i < rings->size; i++) { + struct ice_fwlog_data *ring = &rings->rings[i]; + + /* the first ring is the base memory for the whole range so + * free it + */ + if (!i) + vfree(ring->data); + + ring->data = NULL; + ring->data_size = 0; + } +} + +#define ICE_FWLOG_INDEX_TO_BYTES(n) ((128 * 1024) << (n)) +/** + * ice_fwlog_realloc_rings - reallocate the FW log rings + * @hw: pointer to the HW structure + * @index: the new index to use to allocate memory for the log data + * + */ +void ice_fwlog_realloc_rings(struct ice_hw *hw, int index) +{ + struct ice_fwlog_ring ring; + int status, ring_size; + + /* convert the number of bytes into a number of 4K buffers. externally + * the driver presents the interface to the FW log data as a number of + * bytes because that's easy for users to understand. internally the + * driver uses a ring of buffers because the driver doesn't know where + * the beginning and end of any line of log data is so the driver has + * to overwrite data as complete blocks. when the data is returned to + * the user the driver knows that the data is correct and the FW log + * can be correctly parsed by the tools + */ + ring_size = ICE_FWLOG_INDEX_TO_BYTES(index) / ICE_AQ_MAX_BUF_LEN; + if (ring_size == hw->fwlog_ring.size) + return; + + /* allocate space for the new rings and buffers then release the + * old rings and buffers. that way if we don't have enough + * memory then we at least have what we had before + */ + ring.rings = kcalloc(ring_size, sizeof(*ring.rings), GFP_KERNEL); + if (!ring.rings) + return; + + ring.size = ring_size; + + status = ice_fwlog_alloc_ring_buffs(&ring); + if (status) { + dev_warn(ice_hw_to_dev(hw), "Unable to allocate memory for FW log ring data buffers\n"); + ice_fwlog_free_ring_buffs(&ring); + kfree(ring.rings); + return; + } + + ice_fwlog_free_ring_buffs(&hw->fwlog_ring); + kfree(hw->fwlog_ring.rings); + + hw->fwlog_ring.rings = ring.rings; + hw->fwlog_ring.size = ring.size; + hw->fwlog_ring.index = index; + hw->fwlog_ring.head = 0; + hw->fwlog_ring.tail = 0; +} + +/** + * ice_fwlog_init - Initialize FW logging configuration + * @hw: pointer to the HW structure + * + * This function should be called on driver initialization during + * ice_init_hw(). + */ +int ice_fwlog_init(struct ice_hw *hw) +{ + /* only support fw log commands on PF 0 */ + if (hw->bus.func) + return -EINVAL; + + ice_fwlog_set_supported(hw); + + if (ice_fwlog_supported(hw)) { + int status; + + /* read the current config from the FW and store it */ + status = ice_fwlog_get(hw, &hw->fwlog_cfg); + if (status) + return status; + + hw->fwlog_ring.rings = kcalloc(ICE_FWLOG_RING_SIZE_DFLT, + sizeof(*hw->fwlog_ring.rings), + GFP_KERNEL); + if (!hw->fwlog_ring.rings) { + dev_warn(ice_hw_to_dev(hw), "Unable to allocate memory for FW log rings\n"); + return -ENOMEM; + } + + hw->fwlog_ring.size = ICE_FWLOG_RING_SIZE_DFLT; + hw->fwlog_ring.index = ICE_FWLOG_RING_SIZE_INDEX_DFLT; + + status = ice_fwlog_alloc_ring_buffs(&hw->fwlog_ring); + if (status) { + dev_warn(ice_hw_to_dev(hw), "Unable to allocate memory for FW log ring data buffers\n"); + ice_fwlog_free_ring_buffs(&hw->fwlog_ring); + kfree(hw->fwlog_ring.rings); + return status; + } + + ice_debugfs_fwlog_init(hw->back); + } else { + dev_warn(ice_hw_to_dev(hw), "FW logging is not supported in this NVM image. Please update the NVM to get FW log support\n"); + } + + return 0; +} + +/** + * ice_fwlog_deinit - unroll FW logging configuration + * @hw: pointer to the HW structure + * + * This function should be called in ice_deinit_hw(). + */ +void ice_fwlog_deinit(struct ice_hw *hw) +{ + struct ice_pf *pf = hw->back; + int status; + + /* only support fw log commands on PF 0 */ + if (hw->bus.func) + return; + + /* make sure FW logging is disabled to not put the FW in a weird state + * for the next driver load + */ + hw->fwlog_cfg.options &= ~ICE_FWLOG_OPTION_ARQ_ENA; + status = ice_fwlog_set(hw, &hw->fwlog_cfg); + if (status) + dev_warn(ice_hw_to_dev(hw), "Unable to turn off FW logging, status: %d\n", + status); + + kfree(pf->ice_debugfs_pf_fwlog_modules); + + pf->ice_debugfs_pf_fwlog_modules = NULL; + + status = ice_fwlog_unregister(hw); + if (status) + dev_warn(ice_hw_to_dev(hw), "Unable to unregister FW logging, status: %d\n", + status); + + if (hw->fwlog_ring.rings) { + ice_fwlog_free_ring_buffs(&hw->fwlog_ring); + kfree(hw->fwlog_ring.rings); + } +} + +/** + * ice_fwlog_supported - Cached for whether FW supports FW logging or not + * @hw: pointer to the HW structure + * + * This will always return false if called before ice_init_hw(), so it must be + * called after ice_init_hw(). + */ +bool ice_fwlog_supported(struct ice_hw *hw) +{ + return hw->fwlog_supported; +} + +/** + * ice_aq_fwlog_set - Set FW logging configuration AQ command (0xFF30) + * @hw: pointer to the HW structure + * @entries: entries to configure + * @num_entries: number of @entries + * @options: options from ice_fwlog_cfg->options structure + * @log_resolution: logging resolution + */ +static int +ice_aq_fwlog_set(struct ice_hw *hw, struct ice_fwlog_module_entry *entries, + u16 num_entries, u16 options, u16 log_resolution) +{ + struct ice_aqc_fw_log_cfg_resp *fw_modules; + struct ice_aqc_fw_log *cmd; + struct ice_aq_desc desc; + int status; + int i; + + fw_modules = kcalloc(num_entries, sizeof(*fw_modules), GFP_KERNEL); + if (!fw_modules) + return -ENOMEM; + + for (i = 0; i < num_entries; i++) { + fw_modules[i].module_identifier = + cpu_to_le16(entries[i].module_id); + fw_modules[i].log_level = entries[i].log_level; + } + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logs_config); + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + + cmd = &desc.params.fw_log; + + cmd->cmd_flags = ICE_AQC_FW_LOG_CONF_SET_VALID; + cmd->ops.cfg.log_resolution = cpu_to_le16(log_resolution); + cmd->ops.cfg.mdl_cnt = cpu_to_le16(num_entries); + + if (options & ICE_FWLOG_OPTION_ARQ_ENA) + cmd->cmd_flags |= ICE_AQC_FW_LOG_CONF_AQ_EN; + if (options & ICE_FWLOG_OPTION_UART_ENA) + cmd->cmd_flags |= ICE_AQC_FW_LOG_CONF_UART_EN; + + status = ice_aq_send_cmd(hw, &desc, fw_modules, + sizeof(*fw_modules) * num_entries, + NULL); + + kfree(fw_modules); + + return status; +} + +/** + * ice_fwlog_set - Set the firmware logging settings + * @hw: pointer to the HW structure + * @cfg: config used to set firmware logging + * + * This function should be called whenever the driver needs to set the firmware + * logging configuration. It can be called on initialization, reset, or during + * runtime. + * + * If the PF wishes to receive FW logging then it must register via + * ice_fwlog_register. Note, that ice_fwlog_register does not need to be called + * for init. + */ +int ice_fwlog_set(struct ice_hw *hw, struct ice_fwlog_cfg *cfg) +{ + if (!ice_fwlog_supported(hw)) + return -EOPNOTSUPP; + + return ice_aq_fwlog_set(hw, cfg->module_entries, + ICE_AQC_FW_LOG_ID_MAX, cfg->options, + cfg->log_resolution); +} + +/** + * ice_aq_fwlog_get - Get the current firmware logging configuration (0xFF32) + * @hw: pointer to the HW structure + * @cfg: firmware logging configuration to populate + */ +static int ice_aq_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg) +{ + struct ice_aqc_fw_log_cfg_resp *fw_modules; + struct ice_aqc_fw_log *cmd; + struct ice_aq_desc desc; + u16 module_id_cnt; + int status; + void *buf; + int i; + + memset(cfg, 0, sizeof(*cfg)); + + buf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logs_query); + cmd = &desc.params.fw_log; + + cmd->cmd_flags = ICE_AQC_FW_LOG_AQ_QUERY; + + status = ice_aq_send_cmd(hw, &desc, buf, ICE_AQ_MAX_BUF_LEN, NULL); + if (status) { + ice_debug(hw, ICE_DBG_FW_LOG, "Failed to get FW log configuration\n"); + goto status_out; + } + + module_id_cnt = le16_to_cpu(cmd->ops.cfg.mdl_cnt); + if (module_id_cnt < ICE_AQC_FW_LOG_ID_MAX) { + ice_debug(hw, ICE_DBG_FW_LOG, "FW returned less than the expected number of FW log module IDs\n"); + } else if (module_id_cnt > ICE_AQC_FW_LOG_ID_MAX) { + ice_debug(hw, ICE_DBG_FW_LOG, "FW returned more than expected number of FW log module IDs, setting module_id_cnt to software expected max %u\n", + ICE_AQC_FW_LOG_ID_MAX); + module_id_cnt = ICE_AQC_FW_LOG_ID_MAX; + } + + cfg->log_resolution = le16_to_cpu(cmd->ops.cfg.log_resolution); + if (cmd->cmd_flags & ICE_AQC_FW_LOG_CONF_AQ_EN) + cfg->options |= ICE_FWLOG_OPTION_ARQ_ENA; + if (cmd->cmd_flags & ICE_AQC_FW_LOG_CONF_UART_EN) + cfg->options |= ICE_FWLOG_OPTION_UART_ENA; + if (cmd->cmd_flags & ICE_AQC_FW_LOG_QUERY_REGISTERED) + cfg->options |= ICE_FWLOG_OPTION_IS_REGISTERED; + + fw_modules = (struct ice_aqc_fw_log_cfg_resp *)buf; + + for (i = 0; i < module_id_cnt; i++) { + struct ice_aqc_fw_log_cfg_resp *fw_module = &fw_modules[i]; + + cfg->module_entries[i].module_id = + le16_to_cpu(fw_module->module_identifier); + cfg->module_entries[i].log_level = fw_module->log_level; + } + +status_out: + kfree(buf); + return status; +} + +/** + * ice_fwlog_get - Get the firmware logging settings + * @hw: pointer to the HW structure + * @cfg: config to populate based on current firmware logging settings + */ +int ice_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg) +{ + if (!ice_fwlog_supported(hw)) + return -EOPNOTSUPP; + + return ice_aq_fwlog_get(hw, cfg); +} + +/** + * ice_aq_fwlog_register - Register PF for firmware logging events (0xFF31) + * @hw: pointer to the HW structure + * @reg: true to register and false to unregister + */ +static int ice_aq_fwlog_register(struct ice_hw *hw, bool reg) +{ + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logs_register); + + if (reg) + desc.params.fw_log.cmd_flags = ICE_AQC_FW_LOG_AQ_REGISTER; + + return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); +} + +/** + * ice_fwlog_register - Register the PF for firmware logging + * @hw: pointer to the HW structure + * + * After this call the PF will start to receive firmware logging based on the + * configuration set in ice_fwlog_set. + */ +int ice_fwlog_register(struct ice_hw *hw) +{ + int status; + + if (!ice_fwlog_supported(hw)) + return -EOPNOTSUPP; + + status = ice_aq_fwlog_register(hw, true); + if (status) + ice_debug(hw, ICE_DBG_FW_LOG, "Failed to register for firmware logging events over ARQ\n"); + else + hw->fwlog_cfg.options |= ICE_FWLOG_OPTION_IS_REGISTERED; + + return status; +} + +/** + * ice_fwlog_unregister - Unregister the PF from firmware logging + * @hw: pointer to the HW structure + */ +int ice_fwlog_unregister(struct ice_hw *hw) +{ + int status; + + if (!ice_fwlog_supported(hw)) + return -EOPNOTSUPP; + + status = ice_aq_fwlog_register(hw, false); + if (status) + ice_debug(hw, ICE_DBG_FW_LOG, "Failed to unregister from firmware logging events over ARQ\n"); + else + hw->fwlog_cfg.options &= ~ICE_FWLOG_OPTION_IS_REGISTERED; + + return status; +} + +/** + * ice_fwlog_set_supported - Set if FW logging is supported by FW + * @hw: pointer to the HW struct + * + * If FW returns success to the ice_aq_fwlog_get call then it supports FW + * logging, else it doesn't. Set the fwlog_supported flag accordingly. + * + * This function is only meant to be called during driver init to determine if + * the FW support FW logging. + */ +void ice_fwlog_set_supported(struct ice_hw *hw) +{ + struct ice_fwlog_cfg *cfg; + int status; + + hw->fwlog_supported = false; + + cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); + if (!cfg) + return; + + /* don't call ice_fwlog_get() because that would check to see if FW + * logging is supported which is what the driver is determining now + */ + status = ice_aq_fwlog_get(hw, cfg); + if (status) + ice_debug(hw, ICE_DBG_FW_LOG, "ice_aq_fwlog_get failed, FW logging is not supported on this version of FW, status %d\n", + status); + else + hw->fwlog_supported = true; + + kfree(cfg); +} diff --git a/drivers/net/ethernet/intel/ice/ice_fwlog.h b/drivers/net/ethernet/intel/ice/ice_fwlog.h new file mode 100644 index 0000000000..287e71fa4b --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_fwlog.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2022, Intel Corporation. */ + +#ifndef _ICE_FWLOG_H_ +#define _ICE_FWLOG_H_ +#include "ice_adminq_cmd.h" + +struct ice_hw; + +/* Only a single log level should be set and all log levels under the set value + * are enabled, e.g. if log level is set to ICE_FW_LOG_LEVEL_VERBOSE, then all + * other log levels are included (except ICE_FW_LOG_LEVEL_NONE) + */ +enum ice_fwlog_level { + ICE_FWLOG_LEVEL_NONE = 0, + ICE_FWLOG_LEVEL_ERROR = 1, + ICE_FWLOG_LEVEL_WARNING = 2, + ICE_FWLOG_LEVEL_NORMAL = 3, + ICE_FWLOG_LEVEL_VERBOSE = 4, + ICE_FWLOG_LEVEL_INVALID, /* all values >= this entry are invalid */ +}; + +struct ice_fwlog_module_entry { + /* module ID for the corresponding firmware logging event */ + u16 module_id; + /* verbosity level for the module_id */ + u8 log_level; +}; + +struct ice_fwlog_cfg { + /* list of modules for configuring log level */ + struct ice_fwlog_module_entry module_entries[ICE_AQC_FW_LOG_ID_MAX]; + /* options used to configure firmware logging */ + u16 options; +#define ICE_FWLOG_OPTION_ARQ_ENA BIT(0) +#define ICE_FWLOG_OPTION_UART_ENA BIT(1) + /* set before calling ice_fwlog_init() so the PF registers for firmware + * logging on initialization + */ +#define ICE_FWLOG_OPTION_REGISTER_ON_INIT BIT(2) + /* set in the ice_fwlog_get() response if the PF is registered for FW + * logging events over ARQ + */ +#define ICE_FWLOG_OPTION_IS_REGISTERED BIT(3) + + /* minimum number of log events sent per Admin Receive Queue event */ + u16 log_resolution; +}; + +struct ice_fwlog_data { + u16 data_size; + u8 *data; +}; + +struct ice_fwlog_ring { + struct ice_fwlog_data *rings; + u16 index; + u16 size; + u16 head; + u16 tail; +}; + +#define ICE_FWLOG_RING_SIZE_INDEX_DFLT 3 +#define ICE_FWLOG_RING_SIZE_DFLT 256 +#define ICE_FWLOG_RING_SIZE_MAX 512 + +bool ice_fwlog_ring_full(struct ice_fwlog_ring *rings); +bool ice_fwlog_ring_empty(struct ice_fwlog_ring *rings); +void ice_fwlog_ring_increment(u16 *item, u16 size); +void ice_fwlog_set_supported(struct ice_hw *hw); +bool ice_fwlog_supported(struct ice_hw *hw); +int ice_fwlog_init(struct ice_hw *hw); +void ice_fwlog_deinit(struct ice_hw *hw); +int ice_fwlog_set(struct ice_hw *hw, struct ice_fwlog_cfg *cfg); +int ice_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg); +int ice_fwlog_register(struct ice_hw *hw); +int ice_fwlog_unregister(struct ice_hw *hw); +void ice_fwlog_realloc_rings(struct ice_hw *hw, int index); +#endif /* _ICE_FWLOG_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h index 86936b758a..cfac1d432c 100644 --- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h +++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h @@ -200,6 +200,8 @@ #define GLINT_VECT2FUNC_PF_NUM_M ICE_M(0x7, 12) #define GLINT_VECT2FUNC_IS_PF_S 16 #define GLINT_VECT2FUNC_IS_PF_M BIT(16) +#define PFINT_ALLOC 0x001D2600 +#define PFINT_ALLOC_FIRST ICE_M(0x7FF, 0) #define PFINT_FW_CTL 0x0016C800 #define PFINT_FW_CTL_MSIX_INDX_M ICE_M(0x7FF, 0) #define PFINT_FW_CTL_ITR_INDX_S 11 @@ -404,6 +406,10 @@ #define GLQF_HMASK_SEL(_i) (0x00410000 + ((_i) * 4)) #define GLQF_HMASK_SEL_MAX_INDEX 127 #define GLQF_HMASK_SEL_MASK_SEL_S 0 +#define GLQF_HSYMM(_i, _j) (0x0040F000 + ((_i) * 4 + (_j) * 512)) +#define GLQF_HSYMM_REG_SIZE 4 +#define GLQF_HSYMM_REG_PER_PROF 6 +#define GLQF_HSYMM_ENABLE_BIT BIT(7) #define E800_PFQF_FD_CNT_FD_GCNT_M GENMASK(14, 0) #define E830_PFQF_FD_CNT_FD_GCNT_M GENMASK(15, 0) #define E800_PFQF_FD_CNT_FD_BCNT_M GENMASK(30, 16) diff --git a/drivers/net/ethernet/intel/ice/ice_hwmon.c b/drivers/net/ethernet/intel/ice/ice_hwmon.c new file mode 100644 index 0000000000..e4c2c1bff6 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_hwmon.c @@ -0,0 +1,126 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2023, Intel Corporation. */ + +#include "ice.h" +#include "ice_hwmon.h" +#include "ice_adminq_cmd.h" + +#include + +#define TEMP_FROM_REG(reg) ((reg) * 1000) + +static const struct hwmon_channel_info *ice_hwmon_info[] = { + HWMON_CHANNEL_INFO(temp, + HWMON_T_INPUT | HWMON_T_MAX | + HWMON_T_CRIT | HWMON_T_EMERGENCY), + NULL +}; + +static int ice_hwmon_read(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long *val) +{ + struct ice_aqc_get_sensor_reading_resp resp; + struct ice_pf *pf = dev_get_drvdata(dev); + int ret; + + if (type != hwmon_temp) + return -EOPNOTSUPP; + + ret = ice_aq_get_sensor_reading(&pf->hw, &resp); + if (ret) { + dev_warn_ratelimited(dev, + "%s HW read failure (%d)\n", + __func__, + ret); + return ret; + } + + switch (attr) { + case hwmon_temp_input: + *val = TEMP_FROM_REG(resp.data.s0f0.temp); + break; + case hwmon_temp_max: + *val = TEMP_FROM_REG(resp.data.s0f0.temp_warning_threshold); + break; + case hwmon_temp_crit: + *val = TEMP_FROM_REG(resp.data.s0f0.temp_critical_threshold); + break; + case hwmon_temp_emergency: + *val = TEMP_FROM_REG(resp.data.s0f0.temp_fatal_threshold); + break; + default: + dev_dbg(dev, "%s unsupported attribute (%d)\n", + __func__, attr); + return -EOPNOTSUPP; + } + + return 0; +} + +static umode_t ice_hwmon_is_visible(const void *data, + enum hwmon_sensor_types type, u32 attr, + int channel) +{ + if (type != hwmon_temp) + return 0; + + switch (attr) { + case hwmon_temp_input: + case hwmon_temp_crit: + case hwmon_temp_max: + case hwmon_temp_emergency: + return 0444; + } + + return 0; +} + +static const struct hwmon_ops ice_hwmon_ops = { + .is_visible = ice_hwmon_is_visible, + .read = ice_hwmon_read +}; + +static const struct hwmon_chip_info ice_chip_info = { + .ops = &ice_hwmon_ops, + .info = ice_hwmon_info +}; + +static bool ice_is_internal_reading_supported(struct ice_pf *pf) +{ + /* Only the first PF will report temperature for a chip. + * Note that internal temp reading is not supported + * for older FW (< v4.30). + */ + if (pf->hw.pf_id) + return false; + + unsigned long sensors = pf->hw.dev_caps.supported_sensors; + + return _test_bit(ICE_SENSOR_SUPPORT_E810_INT_TEMP_BIT, &sensors); +}; + +void ice_hwmon_init(struct ice_pf *pf) +{ + struct device *dev = ice_pf_to_dev(pf); + struct device *hdev; + + if (!ice_is_internal_reading_supported(pf)) + return; + + hdev = hwmon_device_register_with_info(dev, "ice", pf, &ice_chip_info, + NULL); + if (IS_ERR(hdev)) { + dev_warn(dev, + "hwmon_device_register_with_info returns error (%ld)", + PTR_ERR(hdev)); + return; + } + pf->hwmon_dev = hdev; +} + +void ice_hwmon_exit(struct ice_pf *pf) +{ + if (!pf->hwmon_dev) + return; + hwmon_device_unregister(pf->hwmon_dev); +} diff --git a/drivers/net/ethernet/intel/ice/ice_hwmon.h b/drivers/net/ethernet/intel/ice/ice_hwmon.h new file mode 100644 index 0000000000..d66d40354f --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_hwmon.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2023, Intel Corporation. */ + +#ifndef _ICE_HWMON_H_ +#define _ICE_HWMON_H_ + +#ifdef CONFIG_ICE_HWMON +void ice_hwmon_init(struct ice_pf *pf); +void ice_hwmon_exit(struct ice_pf *pf); +#else /* CONFIG_ICE_HWMON */ +static inline void ice_hwmon_init(struct ice_pf *pf) { } +static inline void ice_hwmon_exit(struct ice_pf *pf) { } +#endif /* CONFIG_ICE_HWMON */ + +#endif /* _ICE_HWMON_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c index b47cd43ae8..a7a3428099 100644 --- a/drivers/net/ethernet/intel/ice/ice_lag.c +++ b/drivers/net/ethernet/intel/ice/ice_lag.c @@ -151,6 +151,27 @@ ice_lag_find_hw_by_lport(struct ice_lag *lag, u8 lport) return NULL; } +/** + * ice_pkg_has_lport_extract - check if lport extraction supported + * @hw: HW struct + */ +static bool ice_pkg_has_lport_extract(struct ice_hw *hw) +{ + int i; + + for (i = 0; i < hw->blk[ICE_BLK_SW].es.count; i++) { + u16 offset; + u8 fv_prot; + + ice_find_prot_off(hw, ICE_BLK_SW, ICE_SW_DEFAULT_PROFILE, i, + &fv_prot, &offset); + if (fv_prot == ICE_FV_PROT_MDID && + offset == ICE_LP_EXT_BUF_OFFSET) + return true; + } + return false; +} + /** * ice_lag_find_primary - returns pointer to primary interfaces lag struct * @lag: local interfaces lag struct @@ -208,8 +229,7 @@ ice_lag_cfg_fltr(struct ice_lag *lag, u32 act, u16 recipe_id, u16 *rule_idx, eth_hdr = s_rule->hdr_data; ice_fill_eth_hdr(eth_hdr); - act |= (vsi_num << ICE_SINGLE_ACT_VSI_ID_S) & - ICE_SINGLE_ACT_VSI_ID_M; + act |= FIELD_PREP(ICE_SINGLE_ACT_VSI_ID_M, vsi_num); s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX); s_rule->recipe_id = cpu_to_le16(recipe_id); @@ -754,9 +774,7 @@ ice_lag_cfg_cp_fltr(struct ice_lag *lag, bool add) s_rule->act = cpu_to_le32(ICE_FWD_TO_VSI | ICE_SINGLE_ACT_LAN_ENABLE | ICE_SINGLE_ACT_VALID_BIT | - ((vsi->vsi_num << - ICE_SINGLE_ACT_VSI_ID_S) & - ICE_SINGLE_ACT_VSI_ID_M)); + FIELD_PREP(ICE_SINGLE_ACT_VSI_ID_M, vsi->vsi_num)); s_rule->hdr_len = cpu_to_le16(ICE_LAG_SRIOV_TRAIN_PKT_LEN); memcpy(s_rule->hdr_data, lacp_train_pkt, LACP_TRAIN_PKT_LEN); opc = ice_aqc_opc_add_sw_rules; @@ -1209,7 +1227,7 @@ static void ice_lag_del_prune_list(struct ice_lag *lag, struct ice_pf *event_pf) } /** - * ice_lag_init_feature_support_flag - Check for NVM support for LAG + * ice_lag_init_feature_support_flag - Check for package and NVM support for LAG * @pf: PF struct */ static void ice_lag_init_feature_support_flag(struct ice_pf *pf) @@ -1222,7 +1240,7 @@ static void ice_lag_init_feature_support_flag(struct ice_pf *pf) else ice_clear_feature_support(pf, ICE_F_ROCE_LAG); - if (caps->sriov_lag) + if (caps->sriov_lag && ice_pkg_has_lport_extract(&pf->hw)) ice_set_feature_support(pf, ICE_F_SRIOV_LAG); else ice_clear_feature_support(pf, ICE_F_SRIOV_LAG); @@ -2023,7 +2041,7 @@ int ice_init_lag(struct ice_pf *pf) /* associate recipes to profiles */ for (n = 0; n < ICE_PROFID_IPV6_GTPU_IPV6_TCP_INNER; n++) { err = ice_aq_get_recipe_to_profile(&pf->hw, n, - (u8 *)&recipe_bits, NULL); + &recipe_bits, NULL); if (err) continue; @@ -2031,7 +2049,7 @@ int ice_init_lag(struct ice_pf *pf) recipe_bits |= BIT(lag->pf_recipe) | BIT(lag->lport_recipe); ice_aq_map_recipe_to_profile(&pf->hw, n, - (u8 *)&recipe_bits, NULL); + recipe_bits, NULL); } } diff --git a/drivers/net/ethernet/intel/ice/ice_lag.h b/drivers/net/ethernet/intel/ice/ice_lag.h index ede833dfa6..183b38792e 100644 --- a/drivers/net/ethernet/intel/ice/ice_lag.h +++ b/drivers/net/ethernet/intel/ice/ice_lag.h @@ -17,6 +17,9 @@ enum ice_lag_role { #define ICE_LAG_INVALID_PORT 0xFF #define ICE_LAG_RESET_RETRIES 5 +#define ICE_SW_DEFAULT_PROFILE 0 +#define ICE_FV_PROT_MDID 255 +#define ICE_LP_EXT_BUF_OFFSET 32 struct ice_pf; struct ice_vf; diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h index 89f986a75c..d384ddfcb8 100644 --- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h +++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h @@ -673,6 +673,212 @@ struct ice_tlan_ctx { * Use the enum ice_rx_l2_ptype to decode the packet type * ENDIF */ +#define ICE_PTYPES \ + /* L2 Packet types */ \ + ICE_PTT_UNUSED_ENTRY(0), \ + ICE_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), \ + ICE_PTT_UNUSED_ENTRY(2), \ + ICE_PTT_UNUSED_ENTRY(3), \ + ICE_PTT_UNUSED_ENTRY(4), \ + ICE_PTT_UNUSED_ENTRY(5), \ + ICE_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), \ + ICE_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), \ + ICE_PTT_UNUSED_ENTRY(8), \ + ICE_PTT_UNUSED_ENTRY(9), \ + ICE_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), \ + ICE_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), \ + ICE_PTT_UNUSED_ENTRY(12), \ + ICE_PTT_UNUSED_ENTRY(13), \ + ICE_PTT_UNUSED_ENTRY(14), \ + ICE_PTT_UNUSED_ENTRY(15), \ + ICE_PTT_UNUSED_ENTRY(16), \ + ICE_PTT_UNUSED_ENTRY(17), \ + ICE_PTT_UNUSED_ENTRY(18), \ + ICE_PTT_UNUSED_ENTRY(19), \ + ICE_PTT_UNUSED_ENTRY(20), \ + ICE_PTT_UNUSED_ENTRY(21), \ + \ + /* Non Tunneled IPv4 */ \ + ICE_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3), \ + ICE_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3), \ + ICE_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4), \ + ICE_PTT_UNUSED_ENTRY(25), \ + ICE_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4), \ + ICE_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4), \ + ICE_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4), \ + \ + /* IPv4 --> IPv4 */ \ + ICE_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3), \ + ICE_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3), \ + ICE_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4), \ + ICE_PTT_UNUSED_ENTRY(32), \ + ICE_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4), \ + ICE_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), \ + ICE_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), \ + \ + /* IPv4 --> IPv6 */ \ + ICE_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3), \ + ICE_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3), \ + ICE_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4), \ + ICE_PTT_UNUSED_ENTRY(39), \ + ICE_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4), \ + ICE_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), \ + ICE_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), \ + \ + /* IPv4 --> GRE/NAT */ \ + ICE_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), \ + \ + /* IPv4 --> GRE/NAT --> IPv4 */ \ + ICE_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), \ + ICE_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), \ + ICE_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), \ + ICE_PTT_UNUSED_ENTRY(47), \ + ICE_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), \ + ICE_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), \ + ICE_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), \ + \ + /* IPv4 --> GRE/NAT --> IPv6 */ \ + ICE_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), \ + ICE_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), \ + ICE_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), \ + ICE_PTT_UNUSED_ENTRY(54), \ + ICE_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), \ + ICE_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), \ + ICE_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), \ + \ + /* IPv4 --> GRE/NAT --> MAC */ \ + ICE_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), \ + \ + /* IPv4 --> GRE/NAT --> MAC --> IPv4 */ \ + ICE_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), \ + ICE_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), \ + ICE_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), \ + ICE_PTT_UNUSED_ENTRY(62), \ + ICE_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), \ + ICE_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), \ + ICE_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), \ + \ + /* IPv4 --> GRE/NAT -> MAC --> IPv6 */ \ + ICE_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), \ + ICE_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), \ + ICE_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), \ + ICE_PTT_UNUSED_ENTRY(69), \ + ICE_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), \ + ICE_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), \ + ICE_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), \ + \ + /* IPv4 --> GRE/NAT --> MAC/VLAN */ \ + ICE_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), \ + \ + /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */ \ + ICE_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), \ + ICE_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), \ + ICE_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), \ + ICE_PTT_UNUSED_ENTRY(77), \ + ICE_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), \ + ICE_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), \ + ICE_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), \ + \ + /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */ \ + ICE_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), \ + ICE_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), \ + ICE_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), \ + ICE_PTT_UNUSED_ENTRY(84), \ + ICE_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), \ + ICE_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), \ + ICE_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), \ + \ + /* Non Tunneled IPv6 */ \ + ICE_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), \ + ICE_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), \ + ICE_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4), \ + ICE_PTT_UNUSED_ENTRY(91), \ + ICE_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), \ + ICE_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), \ + ICE_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4), \ + \ + /* IPv6 --> IPv4 */ \ + ICE_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3), \ + ICE_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3), \ + ICE_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4), \ + ICE_PTT_UNUSED_ENTRY(98), \ + ICE_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4), \ + ICE_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), \ + ICE_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), \ + \ + /* IPv6 --> IPv6 */ \ + ICE_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3), \ + ICE_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3), \ + ICE_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4), \ + ICE_PTT_UNUSED_ENTRY(105), \ + ICE_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4), \ + ICE_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), \ + ICE_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), \ + \ + /* IPv6 --> GRE/NAT */ \ + ICE_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), \ + \ + /* IPv6 --> GRE/NAT -> IPv4 */ \ + ICE_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), \ + ICE_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), \ + ICE_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), \ + ICE_PTT_UNUSED_ENTRY(113), \ + ICE_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), \ + ICE_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), \ + ICE_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), \ + \ + /* IPv6 --> GRE/NAT -> IPv6 */ \ + ICE_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), \ + ICE_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), \ + ICE_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), \ + ICE_PTT_UNUSED_ENTRY(120), \ + ICE_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), \ + ICE_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), \ + ICE_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), \ + \ + /* IPv6 --> GRE/NAT -> MAC */ \ + ICE_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), \ + \ + /* IPv6 --> GRE/NAT -> MAC -> IPv4 */ \ + ICE_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), \ + ICE_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), \ + ICE_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), \ + ICE_PTT_UNUSED_ENTRY(128), \ + ICE_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), \ + ICE_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), \ + ICE_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), \ + \ + /* IPv6 --> GRE/NAT -> MAC -> IPv6 */ \ + ICE_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), \ + ICE_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), \ + ICE_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), \ + ICE_PTT_UNUSED_ENTRY(135), \ + ICE_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), \ + ICE_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), \ + ICE_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), \ + \ + /* IPv6 --> GRE/NAT -> MAC/VLAN */ \ + ICE_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), \ + \ + /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */ \ + ICE_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), \ + ICE_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), \ + ICE_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), \ + ICE_PTT_UNUSED_ENTRY(143), \ + ICE_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), \ + ICE_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), \ + ICE_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), \ + \ + /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */ \ + ICE_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), \ + ICE_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), \ + ICE_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), \ + ICE_PTT_UNUSED_ENTRY(150), \ + ICE_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), \ + ICE_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), \ + ICE_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), + +#define ICE_NUM_DEFINED_PTYPES 154 /* macro to make the table lines short, use explicit indexing with [PTYPE] */ #define ICE_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\ @@ -695,212 +901,10 @@ struct ice_tlan_ctx { /* Lookup table mapping in the 10-bit HW PTYPE to the bit field for decoding */ static const struct ice_rx_ptype_decoded ice_ptype_lkup[BIT(10)] = { - /* L2 Packet types */ - ICE_PTT_UNUSED_ENTRY(0), - ICE_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), - ICE_PTT_UNUSED_ENTRY(2), - ICE_PTT_UNUSED_ENTRY(3), - ICE_PTT_UNUSED_ENTRY(4), - ICE_PTT_UNUSED_ENTRY(5), - ICE_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), - ICE_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), - ICE_PTT_UNUSED_ENTRY(8), - ICE_PTT_UNUSED_ENTRY(9), - ICE_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), - ICE_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), - ICE_PTT_UNUSED_ENTRY(12), - ICE_PTT_UNUSED_ENTRY(13), - ICE_PTT_UNUSED_ENTRY(14), - ICE_PTT_UNUSED_ENTRY(15), - ICE_PTT_UNUSED_ENTRY(16), - ICE_PTT_UNUSED_ENTRY(17), - ICE_PTT_UNUSED_ENTRY(18), - ICE_PTT_UNUSED_ENTRY(19), - ICE_PTT_UNUSED_ENTRY(20), - ICE_PTT_UNUSED_ENTRY(21), - - /* Non Tunneled IPv4 */ - ICE_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3), - ICE_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3), - ICE_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4), - ICE_PTT_UNUSED_ENTRY(25), - ICE_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4), - ICE_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4), - ICE_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4), - - /* IPv4 --> IPv4 */ - ICE_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3), - ICE_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3), - ICE_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4), - ICE_PTT_UNUSED_ENTRY(32), - ICE_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4), - ICE_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), - ICE_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), - - /* IPv4 --> IPv6 */ - ICE_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3), - ICE_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3), - ICE_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4), - ICE_PTT_UNUSED_ENTRY(39), - ICE_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4), - ICE_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), - ICE_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), - - /* IPv4 --> GRE/NAT */ - ICE_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), - - /* IPv4 --> GRE/NAT --> IPv4 */ - ICE_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), - ICE_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), - ICE_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), - ICE_PTT_UNUSED_ENTRY(47), - ICE_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), - ICE_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), - ICE_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), - - /* IPv4 --> GRE/NAT --> IPv6 */ - ICE_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), - ICE_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), - ICE_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), - ICE_PTT_UNUSED_ENTRY(54), - ICE_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), - ICE_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), - ICE_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), - - /* IPv4 --> GRE/NAT --> MAC */ - ICE_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), - - /* IPv4 --> GRE/NAT --> MAC --> IPv4 */ - ICE_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), - ICE_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), - ICE_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), - ICE_PTT_UNUSED_ENTRY(62), - ICE_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), - ICE_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), - ICE_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), - - /* IPv4 --> GRE/NAT -> MAC --> IPv6 */ - ICE_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), - ICE_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), - ICE_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), - ICE_PTT_UNUSED_ENTRY(69), - ICE_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), - ICE_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), - ICE_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), - - /* IPv4 --> GRE/NAT --> MAC/VLAN */ - ICE_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), - - /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */ - ICE_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), - ICE_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), - ICE_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), - ICE_PTT_UNUSED_ENTRY(77), - ICE_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), - ICE_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), - ICE_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), - - /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */ - ICE_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), - ICE_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), - ICE_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), - ICE_PTT_UNUSED_ENTRY(84), - ICE_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), - ICE_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), - ICE_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), - - /* Non Tunneled IPv6 */ - ICE_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), - ICE_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), - ICE_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4), - ICE_PTT_UNUSED_ENTRY(91), - ICE_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), - ICE_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), - ICE_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4), - - /* IPv6 --> IPv4 */ - ICE_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3), - ICE_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3), - ICE_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4), - ICE_PTT_UNUSED_ENTRY(98), - ICE_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4), - ICE_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), - ICE_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), - - /* IPv6 --> IPv6 */ - ICE_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3), - ICE_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3), - ICE_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4), - ICE_PTT_UNUSED_ENTRY(105), - ICE_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4), - ICE_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), - ICE_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), - - /* IPv6 --> GRE/NAT */ - ICE_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), - - /* IPv6 --> GRE/NAT -> IPv4 */ - ICE_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), - ICE_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), - ICE_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), - ICE_PTT_UNUSED_ENTRY(113), - ICE_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), - ICE_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), - ICE_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), - - /* IPv6 --> GRE/NAT -> IPv6 */ - ICE_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), - ICE_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), - ICE_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), - ICE_PTT_UNUSED_ENTRY(120), - ICE_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), - ICE_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), - ICE_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), - - /* IPv6 --> GRE/NAT -> MAC */ - ICE_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), - - /* IPv6 --> GRE/NAT -> MAC -> IPv4 */ - ICE_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), - ICE_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), - ICE_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), - ICE_PTT_UNUSED_ENTRY(128), - ICE_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), - ICE_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), - ICE_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), - - /* IPv6 --> GRE/NAT -> MAC -> IPv6 */ - ICE_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), - ICE_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), - ICE_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), - ICE_PTT_UNUSED_ENTRY(135), - ICE_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), - ICE_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), - ICE_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), - - /* IPv6 --> GRE/NAT -> MAC/VLAN */ - ICE_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), - - /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */ - ICE_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), - ICE_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), - ICE_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), - ICE_PTT_UNUSED_ENTRY(143), - ICE_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), - ICE_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), - ICE_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), - - /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */ - ICE_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), - ICE_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), - ICE_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), - ICE_PTT_UNUSED_ENTRY(150), - ICE_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), - ICE_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), - ICE_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), + ICE_PTYPES /* unused entries */ - [154 ... 1023] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 } + [ICE_NUM_DEFINED_PTYPES ... 1023] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 } }; static inline struct ice_rx_ptype_decoded ice_decode_rx_desc_ptype(u16 ptype) diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index c01950de44..cfc20684f2 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -212,11 +212,18 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi) vsi->alloc_txq)); break; case ICE_VSI_SWITCHDEV_CTRL: - /* The number of queues for ctrl VSI is equal to number of VFs. + /* The number of queues for ctrl VSI is equal to number of PRs * Each ring is associated to the corresponding VF_PR netdev. + * Tx and Rx rings are always equal */ - vsi->alloc_txq = ice_get_num_vfs(pf); - vsi->alloc_rxq = vsi->alloc_txq; + if (vsi->req_txq && vsi->req_rxq) { + vsi->alloc_txq = vsi->req_txq; + vsi->alloc_rxq = vsi->req_rxq; + } else { + vsi->alloc_txq = 1; + vsi->alloc_rxq = 1; + } + vsi->num_q_vectors = 1; break; case ICE_VSI_VF: @@ -519,16 +526,14 @@ static irqreturn_t ice_eswitch_msix_clean_rings(int __always_unused irq, void *d { struct ice_q_vector *q_vector = (struct ice_q_vector *)data; struct ice_pf *pf = q_vector->vsi->back; - struct ice_vf *vf; - unsigned int bkt; + struct ice_repr *repr; + unsigned long id; if (!q_vector->tx.tx_ring && !q_vector->rx.rx_ring) return IRQ_HANDLED; - rcu_read_lock(); - ice_for_each_vf_rcu(pf, bkt, vf) - napi_schedule(&vf->repr->q_vector->napi); - rcu_read_unlock(); + xa_for_each(&pf->eswitch.reprs, id, repr) + napi_schedule(&repr->q_vector->napi); return IRQ_HANDLED; } @@ -969,9 +974,8 @@ static void ice_set_dflt_vsi_ctx(struct ice_hw *hw, struct ice_vsi_ctx *ctxt) /* Traffic from VSI can be sent to LAN */ ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA; /* allow all untagged/tagged packets by default on Tx */ - ctxt->info.inner_vlan_flags = ((ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL & - ICE_AQ_VSI_INNER_VLAN_TX_MODE_M) >> - ICE_AQ_VSI_INNER_VLAN_TX_MODE_S); + ctxt->info.inner_vlan_flags = FIELD_PREP(ICE_AQ_VSI_INNER_VLAN_TX_MODE_M, + ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL); /* SVM - by default bits 3 and 4 in inner_vlan_flags are 0's which * results in legacy behavior (show VLAN, DEI, and UP) in descriptor. * @@ -982,13 +986,11 @@ static void ice_set_dflt_vsi_ctx(struct ice_hw *hw, struct ice_vsi_ctx *ctxt) FIELD_PREP(ICE_AQ_VSI_INNER_VLAN_EMODE_M, ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING); ctxt->info.outer_vlan_flags = - (ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL << - ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S) & - ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M; + FIELD_PREP(ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M, + ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL); ctxt->info.outer_vlan_flags |= - (ICE_AQ_VSI_OUTER_TAG_VLAN_8100 << - ICE_AQ_VSI_OUTER_TAG_TYPE_S) & - ICE_AQ_VSI_OUTER_TAG_TYPE_M; + FIELD_PREP(ICE_AQ_VSI_OUTER_TAG_TYPE_M, + ICE_AQ_VSI_OUTER_TAG_VLAN_8100); ctxt->info.outer_vlan_flags |= FIELD_PREP(ICE_AQ_VSI_OUTER_VLAN_EMODE_M, ICE_AQ_VSI_OUTER_VLAN_EMODE_NOTHING); @@ -1067,10 +1069,8 @@ static int ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) vsi->tc_cfg.tc_info[i].qcount_tx = num_txq_per_tc; vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++; - qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) & - ICE_AQ_VSI_TC_Q_OFFSET_M) | - ((pow << ICE_AQ_VSI_TC_Q_NUM_S) & - ICE_AQ_VSI_TC_Q_NUM_M); + qmap = FIELD_PREP(ICE_AQ_VSI_TC_Q_OFFSET_M, offset); + qmap |= FIELD_PREP(ICE_AQ_VSI_TC_Q_NUM_M, pow); offset += num_rxq_per_tc; tx_count += num_txq_per_tc; ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); @@ -1153,18 +1153,14 @@ static void ice_set_fd_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi) ctxt->info.max_fd_fltr_shared = cpu_to_le16(vsi->num_bfltr); /* default queue index within the VSI of the default FD */ - val = ((dflt_q << ICE_AQ_VSI_FD_DEF_Q_S) & - ICE_AQ_VSI_FD_DEF_Q_M); + val = FIELD_PREP(ICE_AQ_VSI_FD_DEF_Q_M, dflt_q); /* target queue or queue group to the FD filter */ - val |= ((dflt_q_group << ICE_AQ_VSI_FD_DEF_GRP_S) & - ICE_AQ_VSI_FD_DEF_GRP_M); + val |= FIELD_PREP(ICE_AQ_VSI_FD_DEF_GRP_M, dflt_q_group); ctxt->info.fd_def_q = cpu_to_le16(val); /* queue index on which FD filter completion is reported */ - val = ((report_q << ICE_AQ_VSI_FD_REPORT_Q_S) & - ICE_AQ_VSI_FD_REPORT_Q_M); + val = FIELD_PREP(ICE_AQ_VSI_FD_REPORT_Q_M, report_q); /* priority of the default qindex action */ - val |= ((dflt_q_prio << ICE_AQ_VSI_FD_DEF_PRIORITY_S) & - ICE_AQ_VSI_FD_DEF_PRIORITY_M); + val |= FIELD_PREP(ICE_AQ_VSI_FD_DEF_PRIORITY_M, dflt_q_prio); ctxt->info.fd_report_opt = cpu_to_le16(val); } @@ -1187,12 +1183,10 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi) case ICE_VSI_PF: /* PF VSI will inherit RSS instance of PF */ lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF; - hash_type = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ; break; case ICE_VSI_VF: /* VF VSI will gets a small RSS table which is a VSI LUT type */ lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI; - hash_type = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ; break; default: dev_dbg(dev, "Unsupported VSI type %s\n", @@ -1200,9 +1194,12 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi) return; } - ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) & - ICE_AQ_VSI_Q_OPT_RSS_LUT_M) | - (hash_type & ICE_AQ_VSI_Q_OPT_RSS_HASH_M); + hash_type = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ; + vsi->rss_hfunc = hash_type; + + ctxt->info.q_opt_rss = + FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_LUT_M, lut_type) | + FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_HASH_M, hash_type); } static void @@ -1216,10 +1213,8 @@ ice_chnl_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) qcount = min_t(int, vsi->num_rxq, pf->num_lan_msix); pow = order_base_2(qcount); - qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) & - ICE_AQ_VSI_TC_Q_OFFSET_M) | - ((pow << ICE_AQ_VSI_TC_Q_NUM_S) & - ICE_AQ_VSI_TC_Q_NUM_M); + qmap = FIELD_PREP(ICE_AQ_VSI_TC_Q_OFFSET_M, offset); + qmap |= FIELD_PREP(ICE_AQ_VSI_TC_Q_NUM_M, pow); ctxt->info.tc_mapping[0] = cpu_to_le16(qmap); ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG); @@ -1601,12 +1596,44 @@ static void ice_vsi_set_vf_rss_flow_fld(struct ice_vsi *vsi) return; } - status = ice_add_avf_rss_cfg(&pf->hw, vsi->idx, ICE_DEFAULT_RSS_HENA); + status = ice_add_avf_rss_cfg(&pf->hw, vsi, ICE_DEFAULT_RSS_HENA); if (status) dev_dbg(dev, "ice_add_avf_rss_cfg failed for vsi = %d, error = %d\n", vsi->vsi_num, status); } +static const struct ice_rss_hash_cfg default_rss_cfgs[] = { + /* configure RSS for IPv4 with input set IP src/dst */ + {ICE_FLOW_SEG_HDR_IPV4, ICE_FLOW_HASH_IPV4, ICE_RSS_ANY_HEADERS, false}, + /* configure RSS for IPv6 with input set IPv6 src/dst */ + {ICE_FLOW_SEG_HDR_IPV6, ICE_FLOW_HASH_IPV6, ICE_RSS_ANY_HEADERS, false}, + /* configure RSS for tcp4 with input set IP src/dst, TCP src/dst */ + {ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4, + ICE_HASH_TCP_IPV4, ICE_RSS_ANY_HEADERS, false}, + /* configure RSS for udp4 with input set IP src/dst, UDP src/dst */ + {ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4, + ICE_HASH_UDP_IPV4, ICE_RSS_ANY_HEADERS, false}, + /* configure RSS for sctp4 with input set IP src/dst - only support + * RSS on SCTPv4 on outer headers (non-tunneled) + */ + {ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4, + ICE_HASH_SCTP_IPV4, ICE_RSS_OUTER_HEADERS, false}, + /* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst */ + {ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6, + ICE_HASH_TCP_IPV6, ICE_RSS_ANY_HEADERS, false}, + /* configure RSS for udp6 with input set IPv6 src/dst, UDP src/dst */ + {ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6, + ICE_HASH_UDP_IPV6, ICE_RSS_ANY_HEADERS, false}, + /* configure RSS for sctp6 with input set IPv6 src/dst - only support + * RSS on SCTPv6 on outer headers (non-tunneled) + */ + {ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6, + ICE_HASH_SCTP_IPV6, ICE_RSS_OUTER_HEADERS, false}, + /* configure RSS for IPSEC ESP SPI with input set MAC_IPV4_SPI */ + {ICE_FLOW_SEG_HDR_ESP, + ICE_FLOW_HASH_ESP_SPI, ICE_RSS_OUTER_HEADERS, false}, +}; + /** * ice_vsi_set_rss_flow_fld - Sets RSS input set for different flows * @vsi: VSI to be configured @@ -1620,11 +1647,12 @@ static void ice_vsi_set_vf_rss_flow_fld(struct ice_vsi *vsi) */ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi) { - u16 vsi_handle = vsi->idx, vsi_num = vsi->vsi_num; + u16 vsi_num = vsi->vsi_num; struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; struct device *dev; int status; + u32 i; dev = ice_pf_to_dev(pf); if (ice_is_safe_mode(pf)) { @@ -1632,67 +1660,15 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi) vsi_num); return; } - /* configure RSS for IPv4 with input set IP src/dst */ - status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4, - ICE_FLOW_SEG_HDR_IPV4); - if (status) - dev_dbg(dev, "ice_add_rss_cfg failed for ipv4 flow, vsi = %d, error = %d\n", - vsi_num, status); - - /* configure RSS for IPv6 with input set IPv6 src/dst */ - status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6, - ICE_FLOW_SEG_HDR_IPV6); - if (status) - dev_dbg(dev, "ice_add_rss_cfg failed for ipv6 flow, vsi = %d, error = %d\n", - vsi_num, status); - - /* configure RSS for tcp4 with input set IP src/dst, TCP src/dst */ - status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV4, - ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4); - if (status) - dev_dbg(dev, "ice_add_rss_cfg failed for tcp4 flow, vsi = %d, error = %d\n", - vsi_num, status); - - /* configure RSS for udp4 with input set IP src/dst, UDP src/dst */ - status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV4, - ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4); - if (status) - dev_dbg(dev, "ice_add_rss_cfg failed for udp4 flow, vsi = %d, error = %d\n", - vsi_num, status); - - /* configure RSS for sctp4 with input set IP src/dst */ - status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4, - ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4); - if (status) - dev_dbg(dev, "ice_add_rss_cfg failed for sctp4 flow, vsi = %d, error = %d\n", - vsi_num, status); - - /* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst */ - status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV6, - ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6); - if (status) - dev_dbg(dev, "ice_add_rss_cfg failed for tcp6 flow, vsi = %d, error = %d\n", - vsi_num, status); + for (i = 0; i < ARRAY_SIZE(default_rss_cfgs); i++) { + const struct ice_rss_hash_cfg *cfg = &default_rss_cfgs[i]; - /* configure RSS for udp6 with input set IPv6 src/dst, UDP src/dst */ - status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV6, - ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6); - if (status) - dev_dbg(dev, "ice_add_rss_cfg failed for udp6 flow, vsi = %d, error = %d\n", - vsi_num, status); - - /* configure RSS for sctp6 with input set IPv6 src/dst */ - status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6, - ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6); - if (status) - dev_dbg(dev, "ice_add_rss_cfg failed for sctp6 flow, vsi = %d, error = %d\n", - vsi_num, status); - - status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_ESP_SPI, - ICE_FLOW_SEG_HDR_ESP); - if (status) - dev_dbg(dev, "ice_add_rss_cfg failed for esp/spi flow, vsi = %d, error = %d\n", - vsi_num, status); + status = ice_add_rss_cfg(hw, vsi, cfg); + if (status) + dev_dbg(dev, "ice_add_rss_cfg failed, addl_hdrs = %x, hash_flds = %llx, hdr_type = %d, symm = %d\n", + cfg->addl_hdrs, cfg->hash_flds, + cfg->hdr_type, cfg->symm); + } } /** @@ -1809,11 +1785,8 @@ ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio, QRXFLXP_CNTXT_RXDID_PRIO_M | QRXFLXP_CNTXT_TS_M); - regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) & - QRXFLXP_CNTXT_RXDID_IDX_M; - - regval |= (prio << QRXFLXP_CNTXT_RXDID_PRIO_S) & - QRXFLXP_CNTXT_RXDID_PRIO_M; + regval |= FIELD_PREP(QRXFLXP_CNTXT_RXDID_IDX_M, rxdid); + regval |= FIELD_PREP(QRXFLXP_CNTXT_RXDID_PRIO_M, prio); if (ena_ts) /* Enable TimeSync on this queue */ @@ -2451,6 +2424,10 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params) goto unroll_vector_base; ice_vsi_map_rings_to_vectors(vsi); + + /* Associate q_vector rings to napi */ + ice_vsi_set_napi_queues(vsi); + vsi->stat_offsets_loaded = false; if (ice_is_xdp_ena_vsi(vsi)) { @@ -2926,6 +2903,123 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi) synchronize_irq(vsi->q_vectors[i]->irq.virq); } +/** + * __ice_queue_set_napi - Set the napi instance for the queue + * @dev: device to which NAPI and queue belong + * @queue_index: Index of queue + * @type: queue type as RX or TX + * @napi: NAPI context + * @locked: is the rtnl_lock already held + * + * Set the napi instance for the queue. Caller indicates the lock status. + */ +static void +__ice_queue_set_napi(struct net_device *dev, unsigned int queue_index, + enum netdev_queue_type type, struct napi_struct *napi, + bool locked) +{ + if (!locked) + rtnl_lock(); + netif_queue_set_napi(dev, queue_index, type, napi); + if (!locked) + rtnl_unlock(); +} + +/** + * ice_queue_set_napi - Set the napi instance for the queue + * @vsi: VSI being configured + * @queue_index: Index of queue + * @type: queue type as RX or TX + * @napi: NAPI context + * + * Set the napi instance for the queue. The rtnl lock state is derived from the + * execution path. + */ +void +ice_queue_set_napi(struct ice_vsi *vsi, unsigned int queue_index, + enum netdev_queue_type type, struct napi_struct *napi) +{ + struct ice_pf *pf = vsi->back; + + if (!vsi->netdev) + return; + + if (current_work() == &pf->serv_task || + test_bit(ICE_PREPARED_FOR_RESET, pf->state) || + test_bit(ICE_DOWN, pf->state) || + test_bit(ICE_SUSPENDED, pf->state)) + __ice_queue_set_napi(vsi->netdev, queue_index, type, napi, + false); + else + __ice_queue_set_napi(vsi->netdev, queue_index, type, napi, + true); +} + +/** + * __ice_q_vector_set_napi_queues - Map queue[s] associated with the napi + * @q_vector: q_vector pointer + * @locked: is the rtnl_lock already held + * + * Associate the q_vector napi with all the queue[s] on the vector. + * Caller indicates the lock status. + */ +void __ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked) +{ + struct ice_rx_ring *rx_ring; + struct ice_tx_ring *tx_ring; + + ice_for_each_rx_ring(rx_ring, q_vector->rx) + __ice_queue_set_napi(q_vector->vsi->netdev, rx_ring->q_index, + NETDEV_QUEUE_TYPE_RX, &q_vector->napi, + locked); + + ice_for_each_tx_ring(tx_ring, q_vector->tx) + __ice_queue_set_napi(q_vector->vsi->netdev, tx_ring->q_index, + NETDEV_QUEUE_TYPE_TX, &q_vector->napi, + locked); + /* Also set the interrupt number for the NAPI */ + netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq); +} + +/** + * ice_q_vector_set_napi_queues - Map queue[s] associated with the napi + * @q_vector: q_vector pointer + * + * Associate the q_vector napi with all the queue[s] on the vector + */ +void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector) +{ + struct ice_rx_ring *rx_ring; + struct ice_tx_ring *tx_ring; + + ice_for_each_rx_ring(rx_ring, q_vector->rx) + ice_queue_set_napi(q_vector->vsi, rx_ring->q_index, + NETDEV_QUEUE_TYPE_RX, &q_vector->napi); + + ice_for_each_tx_ring(tx_ring, q_vector->tx) + ice_queue_set_napi(q_vector->vsi, tx_ring->q_index, + NETDEV_QUEUE_TYPE_TX, &q_vector->napi); + /* Also set the interrupt number for the NAPI */ + netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq); +} + +/** + * ice_vsi_set_napi_queues + * @vsi: VSI pointer + * + * Associate queue[s] with napi for all vectors + */ +void ice_vsi_set_napi_queues(struct ice_vsi *vsi) +{ + int i; + + if (!vsi->netdev) + return; + + ice_for_each_q_vector(vsi, i) + ice_q_vector_set_napi_queues(vsi->q_vectors[i]); +} + /** * ice_vsi_release - Delete a VSI and free its resources * @vsi: the VSI being removed @@ -3071,27 +3165,26 @@ ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi, } /** - * ice_vsi_realloc_stat_arrays - Frees unused stat structures + * ice_vsi_realloc_stat_arrays - Frees unused stat structures or alloc new ones * @vsi: VSI pointer - * @prev_txq: Number of Tx rings before ring reallocation - * @prev_rxq: Number of Rx rings before ring reallocation */ -static void -ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi, int prev_txq, int prev_rxq) +static int +ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi) { + u16 req_txq = vsi->req_txq ? vsi->req_txq : vsi->alloc_txq; + u16 req_rxq = vsi->req_rxq ? vsi->req_rxq : vsi->alloc_rxq; + struct ice_ring_stats **tx_ring_stats; + struct ice_ring_stats **rx_ring_stats; struct ice_vsi_stats *vsi_stat; struct ice_pf *pf = vsi->back; + u16 prev_txq = vsi->alloc_txq; + u16 prev_rxq = vsi->alloc_rxq; int i; - if (!prev_txq || !prev_rxq) - return; - if (vsi->type == ICE_VSI_CHNL) - return; - vsi_stat = pf->vsi_stats[vsi->idx]; - if (vsi->num_txq < prev_txq) { - for (i = vsi->num_txq; i < prev_txq; i++) { + if (req_txq < prev_txq) { + for (i = req_txq; i < prev_txq; i++) { if (vsi_stat->tx_ring_stats[i]) { kfree_rcu(vsi_stat->tx_ring_stats[i], rcu); WRITE_ONCE(vsi_stat->tx_ring_stats[i], NULL); @@ -3099,14 +3192,36 @@ ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi, int prev_txq, int prev_rxq) } } - if (vsi->num_rxq < prev_rxq) { - for (i = vsi->num_rxq; i < prev_rxq; i++) { + tx_ring_stats = vsi_stat->tx_ring_stats; + vsi_stat->tx_ring_stats = + krealloc_array(vsi_stat->tx_ring_stats, req_txq, + sizeof(*vsi_stat->tx_ring_stats), + GFP_KERNEL | __GFP_ZERO); + if (!vsi_stat->tx_ring_stats) { + vsi_stat->tx_ring_stats = tx_ring_stats; + return -ENOMEM; + } + + if (req_rxq < prev_rxq) { + for (i = req_rxq; i < prev_rxq; i++) { if (vsi_stat->rx_ring_stats[i]) { kfree_rcu(vsi_stat->rx_ring_stats[i], rcu); WRITE_ONCE(vsi_stat->rx_ring_stats[i], NULL); } } } + + rx_ring_stats = vsi_stat->rx_ring_stats; + vsi_stat->rx_ring_stats = + krealloc_array(vsi_stat->rx_ring_stats, req_rxq, + sizeof(*vsi_stat->rx_ring_stats), + GFP_KERNEL | __GFP_ZERO); + if (!vsi_stat->rx_ring_stats) { + vsi_stat->rx_ring_stats = rx_ring_stats; + return -ENOMEM; + } + + return 0; } /** @@ -3123,9 +3238,9 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags) { struct ice_vsi_cfg_params params = {}; struct ice_coalesce_stored *coalesce; - int ret, prev_txq, prev_rxq; - int prev_num_q_vectors = 0; + int prev_num_q_vectors; struct ice_pf *pf; + int ret; if (!vsi) return -EINVAL; @@ -3137,6 +3252,15 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags) if (WARN_ON(vsi->type == ICE_VSI_VF && !vsi->vf)) return -EINVAL; + ret = ice_vsi_realloc_stat_arrays(vsi); + if (ret) + goto err_vsi_cfg; + + ice_vsi_decfg(vsi); + ret = ice_vsi_cfg_def(vsi, ¶ms); + if (ret) + goto err_vsi_cfg; + coalesce = kcalloc(vsi->num_q_vectors, sizeof(struct ice_coalesce_stored), GFP_KERNEL); if (!coalesce) @@ -3144,14 +3268,6 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags) prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce); - prev_txq = vsi->num_txq; - prev_rxq = vsi->num_rxq; - - ice_vsi_decfg(vsi); - ret = ice_vsi_cfg_def(vsi, ¶ms); - if (ret) - goto err_vsi_cfg; - ret = ice_vsi_cfg_tc_lan(pf, vsi); if (ret) { if (vsi_flags & ICE_VSI_FLAG_INIT) { @@ -3163,8 +3279,6 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags) return ice_schedule_reset(pf, ICE_RESET_PFR); } - ice_vsi_realloc_stat_arrays(vsi, prev_txq, prev_rxq); - ice_vsi_rebuild_set_coalesce(vsi, coalesce, prev_num_q_vectors); kfree(coalesce); @@ -3172,8 +3286,8 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags) err_vsi_cfg_tc_lan: ice_vsi_decfg(vsi); -err_vsi_cfg: kfree(coalesce); +err_vsi_cfg: return ret; } @@ -3316,9 +3430,8 @@ ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt, vsi->tc_cfg.ena_tc = ena_tc ? ena_tc : 1; pow = order_base_2(tc0_qcount); - qmap = ((tc0_offset << ICE_AQ_VSI_TC_Q_OFFSET_S) & - ICE_AQ_VSI_TC_Q_OFFSET_M) | - ((pow << ICE_AQ_VSI_TC_Q_NUM_S) & ICE_AQ_VSI_TC_Q_NUM_M); + qmap = FIELD_PREP(ICE_AQ_VSI_TC_Q_OFFSET_M, tc0_offset); + qmap |= FIELD_PREP(ICE_AQ_VSI_TC_Q_NUM_M, pow); ice_for_each_traffic_class(i) { if (!(vsi->tc_cfg.ena_tc & BIT(i))) { diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h index f24f5d1e6f..bfcfc582a4 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_lib.h @@ -91,6 +91,16 @@ void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc); struct ice_vsi * ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params); +void +ice_queue_set_napi(struct ice_vsi *vsi, unsigned int queue_index, + enum netdev_queue_type type, struct napi_struct *napi); + +void __ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked); + +void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector); + +void ice_vsi_set_napi_queues(struct ice_vsi *vsi); + int ice_vsi_release(struct ice_vsi *vsi); void ice_vsi_close(struct ice_vsi *vsi); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index dabf33cec3..6d256dbcb7 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -14,6 +14,7 @@ #include "ice_dcb_lib.h" #include "ice_dcb_nl.h" #include "ice_devlink.h" +#include "ice_hwmon.h" /* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the * ice tracepoint functions. This must be done exactly once across the * ice driver. @@ -979,7 +980,7 @@ static void ice_set_dflt_mib(struct ice_pf *pf) * Octets 13 - 20 are TSA values - leave as zeros */ buf[5] = 0x64; - len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S; + len = FIELD_GET(ICE_LLDP_TLV_LEN_M, typelen); offset += len + 2; tlv = (struct ice_lldp_org_tlv *) ((char *)tlv + sizeof(tlv->typelen) + len); @@ -1013,7 +1014,7 @@ static void ice_set_dflt_mib(struct ice_pf *pf) /* Octet 1 left as all zeros - PFC disabled */ buf[0] = 0x08; - len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S; + len = FIELD_GET(ICE_LLDP_TLV_LEN_M, typelen); offset += len + 2; if (ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, offset, NULL)) @@ -1251,6 +1252,32 @@ ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event) return status; } +/** + * ice_get_fwlog_data - copy the FW log data from ARQ event + * @pf: PF that the FW log event is associated with + * @event: event structure containing FW log data + */ +static void +ice_get_fwlog_data(struct ice_pf *pf, struct ice_rq_event_info *event) +{ + struct ice_fwlog_data *fwlog; + struct ice_hw *hw = &pf->hw; + + fwlog = &hw->fwlog_ring.rings[hw->fwlog_ring.tail]; + + memset(fwlog->data, 0, PAGE_SIZE); + fwlog->data_size = le16_to_cpu(event->desc.datalen); + + memcpy(fwlog->data, event->msg_buf, fwlog->data_size); + ice_fwlog_ring_increment(&hw->fwlog_ring.tail, hw->fwlog_ring.size); + + if (ice_fwlog_ring_full(&hw->fwlog_ring)) { + /* the rings are full so bump the head to create room */ + ice_fwlog_ring_increment(&hw->fwlog_ring.head, + hw->fwlog_ring.size); + } +} + /** * ice_aq_prep_for_event - Prepare to wait for an AdminQ event from firmware * @pf: pointer to the PF private structure @@ -1532,8 +1559,8 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) ice_vc_process_vf_msg(pf, &event, &data); break; - case ice_aqc_opc_fw_logging: - ice_output_fw_log(hw, &event.desc, event.msg_buf); + case ice_aqc_opc_fw_logs_event: + ice_get_fwlog_data(pf, &event); break; case ice_aqc_opc_lldp_set_mib_change: ice_dcb_process_lldp_set_mib_change(pf, &event); @@ -1744,14 +1771,10 @@ static void ice_handle_mdd_event(struct ice_pf *pf) /* find what triggered an MDD event */ reg = rd32(hw, GL_MDET_TX_PQM); if (reg & GL_MDET_TX_PQM_VALID_M) { - u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >> - GL_MDET_TX_PQM_PF_NUM_S; - u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >> - GL_MDET_TX_PQM_VF_NUM_S; - u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >> - GL_MDET_TX_PQM_MAL_TYPE_S; - u16 queue = ((reg & GL_MDET_TX_PQM_QNUM_M) >> - GL_MDET_TX_PQM_QNUM_S); + u8 pf_num = FIELD_GET(GL_MDET_TX_PQM_PF_NUM_M, reg); + u16 vf_num = FIELD_GET(GL_MDET_TX_PQM_VF_NUM_M, reg); + u8 event = FIELD_GET(GL_MDET_TX_PQM_MAL_TYPE_M, reg); + u16 queue = FIELD_GET(GL_MDET_TX_PQM_QNUM_M, reg); if (netif_msg_tx_err(pf)) dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", @@ -1761,14 +1784,10 @@ static void ice_handle_mdd_event(struct ice_pf *pf) reg = rd32(hw, GL_MDET_TX_TCLAN_BY_MAC(hw)); if (reg & GL_MDET_TX_TCLAN_VALID_M) { - u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >> - GL_MDET_TX_TCLAN_PF_NUM_S; - u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >> - GL_MDET_TX_TCLAN_VF_NUM_S; - u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >> - GL_MDET_TX_TCLAN_MAL_TYPE_S; - u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >> - GL_MDET_TX_TCLAN_QNUM_S); + u8 pf_num = FIELD_GET(GL_MDET_TX_TCLAN_PF_NUM_M, reg); + u16 vf_num = FIELD_GET(GL_MDET_TX_TCLAN_VF_NUM_M, reg); + u8 event = FIELD_GET(GL_MDET_TX_TCLAN_MAL_TYPE_M, reg); + u16 queue = FIELD_GET(GL_MDET_TX_TCLAN_QNUM_M, reg); if (netif_msg_tx_err(pf)) dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", @@ -1778,14 +1797,10 @@ static void ice_handle_mdd_event(struct ice_pf *pf) reg = rd32(hw, GL_MDET_RX); if (reg & GL_MDET_RX_VALID_M) { - u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >> - GL_MDET_RX_PF_NUM_S; - u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >> - GL_MDET_RX_VF_NUM_S; - u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >> - GL_MDET_RX_MAL_TYPE_S; - u16 queue = ((reg & GL_MDET_RX_QNUM_M) >> - GL_MDET_RX_QNUM_S); + u8 pf_num = FIELD_GET(GL_MDET_RX_PF_NUM_M, reg); + u16 vf_num = FIELD_GET(GL_MDET_RX_VF_NUM_M, reg); + u8 event = FIELD_GET(GL_MDET_RX_MAL_TYPE_M, reg); + u16 queue = FIELD_GET(GL_MDET_RX_QNUM_M, reg); if (netif_msg_rx_err(pf)) dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n", @@ -3031,6 +3046,7 @@ static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp) static void ice_ena_misc_vector(struct ice_pf *pf) { struct ice_hw *hw = &pf->hw; + u32 pf_intr_start_offset; u32 val; /* Disable anti-spoof detection interrupt to prevent spurious event @@ -3059,6 +3075,47 @@ static void ice_ena_misc_vector(struct ice_pf *pf) /* SW_ITR_IDX = 0, but don't change INTENA */ wr32(hw, GLINT_DYN_CTL(pf->oicr_irq.index), GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M); + + if (!pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) + return; + pf_intr_start_offset = rd32(hw, PFINT_ALLOC) & PFINT_ALLOC_FIRST; + wr32(hw, GLINT_DYN_CTL(pf->ll_ts_irq.index + pf_intr_start_offset), + GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M); +} + +/** + * ice_ll_ts_intr - ll_ts interrupt handler + * @irq: interrupt number + * @data: pointer to a q_vector + */ +static irqreturn_t ice_ll_ts_intr(int __always_unused irq, void *data) +{ + struct ice_pf *pf = data; + u32 pf_intr_start_offset; + struct ice_ptp_tx *tx; + unsigned long flags; + struct ice_hw *hw; + u32 val; + u8 idx; + + hw = &pf->hw; + tx = &pf->ptp.port.tx; + spin_lock_irqsave(&tx->lock, flags); + ice_ptp_complete_tx_single_tstamp(tx); + + idx = find_next_bit_wrap(tx->in_use, tx->len, + tx->last_ll_ts_idx_read + 1); + if (idx != tx->len) + ice_ptp_req_tx_single_tstamp(tx, idx); + spin_unlock_irqrestore(&tx->lock, flags); + + val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M | + (ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S); + pf_intr_start_offset = rd32(hw, PFINT_ALLOC) & PFINT_ALLOC_FIRST; + wr32(hw, GLINT_DYN_CTL(pf->ll_ts_irq.index + pf_intr_start_offset), + val); + + return IRQ_HANDLED; } /** @@ -3069,6 +3126,7 @@ static void ice_ena_misc_vector(struct ice_pf *pf) static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) { struct ice_pf *pf = (struct ice_pf *)data; + irqreturn_t ret = IRQ_HANDLED; struct ice_hw *hw = &pf->hw; struct device *dev; u32 oicr, ena_mask; @@ -3108,8 +3166,8 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) /* we have a reset warning */ ena_mask &= ~PFINT_OICR_GRST_M; - reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >> - GLGEN_RSTAT_RESET_TYPE_S; + reset = FIELD_GET(GLGEN_RSTAT_RESET_TYPE_M, + rd32(hw, GLGEN_RSTAT)); if (reset == ICE_RESET_CORER) pf->corer_count++; @@ -3150,8 +3208,22 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) if (oicr & PFINT_OICR_TSYN_TX_M) { ena_mask &= ~PFINT_OICR_TSYN_TX_M; - if (!hw->reset_ongoing && ice_ptp_pf_handles_tx_interrupt(pf)) + if (ice_pf_state_is_nominal(pf) && + pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) { + struct ice_ptp_tx *tx = &pf->ptp.port.tx; + unsigned long flags; + u8 idx; + + spin_lock_irqsave(&tx->lock, flags); + idx = find_next_bit_wrap(tx->in_use, tx->len, + tx->last_ll_ts_idx_read + 1); + if (idx != tx->len) + ice_ptp_req_tx_single_tstamp(tx, idx); + spin_unlock_irqrestore(&tx->lock, flags); + } else if (ice_ptp_pf_handles_tx_interrupt(pf)) { set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread); + ret = IRQ_WAKE_THREAD; + } } if (oicr & PFINT_OICR_TSYN_EVNT_M) { @@ -3167,7 +3239,7 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) GLTSYN_STAT_EVENT1_M | GLTSYN_STAT_EVENT2_M); - set_bit(ICE_MISC_THREAD_EXTTS_EVENT, pf->misc_thread); + ice_ptp_extts_event(pf); } } @@ -3190,8 +3262,11 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) set_bit(ICE_PFR_REQ, pf->state); } } + ice_service_task_schedule(pf); + if (ret == IRQ_HANDLED) + ice_irq_dynamic_ena(hw, NULL, NULL); - return IRQ_WAKE_THREAD; + return ret; } /** @@ -3207,12 +3282,7 @@ static irqreturn_t ice_misc_intr_thread_fn(int __always_unused irq, void *data) hw = &pf->hw; if (ice_is_reset_in_progress(pf->state)) - return IRQ_HANDLED; - - ice_service_task_schedule(pf); - - if (test_and_clear_bit(ICE_MISC_THREAD_EXTTS_EVENT, pf->misc_thread)) - ice_ptp_extts_event(pf); + goto skip_irq; if (test_and_clear_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread)) { /* Process outstanding Tx timestamps. If there is more work, @@ -3224,6 +3294,7 @@ static irqreturn_t ice_misc_intr_thread_fn(int __always_unused irq, void *data) } } +skip_irq: ice_irq_dynamic_ena(hw, NULL, NULL); return IRQ_HANDLED; @@ -3253,6 +3324,20 @@ static void ice_dis_ctrlq_interrupts(struct ice_hw *hw) ice_flush(hw); } +/** + * ice_free_irq_msix_ll_ts- Unroll ll_ts vector setup + * @pf: board private structure + */ +static void ice_free_irq_msix_ll_ts(struct ice_pf *pf) +{ + int irq_num = pf->ll_ts_irq.virq; + + synchronize_irq(irq_num); + devm_free_irq(ice_pf_to_dev(pf), irq_num, pf); + + ice_free_irq(pf, pf->ll_ts_irq); +} + /** * ice_free_irq_msix_misc - Unroll misc vector setup * @pf: board private structure @@ -3272,6 +3357,8 @@ static void ice_free_irq_msix_misc(struct ice_pf *pf) devm_free_irq(ice_pf_to_dev(pf), misc_irq_num, pf); ice_free_irq(pf, pf->oicr_irq); + if (pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) + ice_free_irq_msix_ll_ts(pf); } /** @@ -3297,10 +3384,12 @@ static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx) PFINT_MBX_CTL_CAUSE_ENA_M); wr32(hw, PFINT_MBX_CTL, val); - /* This enables Sideband queue Interrupt causes */ - val = ((reg_idx & PFINT_SB_CTL_MSIX_INDX_M) | - PFINT_SB_CTL_CAUSE_ENA_M); - wr32(hw, PFINT_SB_CTL, val); + if (!hw->dev_caps.ts_dev_info.ts_ll_int_read) { + /* enable Sideband queue Interrupt causes */ + val = ((reg_idx & PFINT_SB_CTL_MSIX_INDX_M) | + PFINT_SB_CTL_CAUSE_ENA_M); + wr32(hw, PFINT_SB_CTL, val); + } ice_flush(hw); } @@ -3317,13 +3406,17 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf) { struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; - struct msi_map oicr_irq; + u32 pf_intr_start_offset; + struct msi_map irq; int err = 0; if (!pf->int_name[0]) snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc", dev_driver_string(dev), dev_name(dev)); + if (!pf->int_name_ll_ts[0]) + snprintf(pf->int_name_ll_ts, sizeof(pf->int_name_ll_ts) - 1, + "%s-%s:ll_ts", dev_driver_string(dev), dev_name(dev)); /* Do not request IRQ but do enable OICR interrupt since settings are * lost during reset. Note that this function is called only during * rebuild path and not while reset is in progress. @@ -3332,11 +3425,11 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf) goto skip_req_irq; /* reserve one vector in irq_tracker for misc interrupts */ - oicr_irq = ice_alloc_irq(pf, false); - if (oicr_irq.index < 0) - return oicr_irq.index; + irq = ice_alloc_irq(pf, false); + if (irq.index < 0) + return irq.index; - pf->oicr_irq = oicr_irq; + pf->oicr_irq = irq; err = devm_request_threaded_irq(dev, pf->oicr_irq.virq, ice_misc_intr, ice_misc_intr_thread_fn, 0, pf->int_name, pf); @@ -3347,10 +3440,34 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf) return err; } + /* reserve one vector in irq_tracker for ll_ts interrupt */ + if (!pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) + goto skip_req_irq; + + irq = ice_alloc_irq(pf, false); + if (irq.index < 0) + return irq.index; + + pf->ll_ts_irq = irq; + err = devm_request_irq(dev, pf->ll_ts_irq.virq, ice_ll_ts_intr, 0, + pf->int_name_ll_ts, pf); + if (err) { + dev_err(dev, "devm_request_irq for %s failed: %d\n", + pf->int_name_ll_ts, err); + ice_free_irq(pf, pf->ll_ts_irq); + return err; + } + skip_req_irq: ice_ena_misc_vector(pf); ice_ena_ctrlq_interrupts(hw, pf->oicr_irq.index); + /* This enables LL TS interrupt */ + pf_intr_start_offset = rd32(hw, PFINT_ALLOC) & PFINT_ALLOC_FIRST; + if (pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) + wr32(hw, PFINT_SB_CTL, + ((pf->ll_ts_irq.index + pf_intr_start_offset) & + PFINT_SB_CTL_MSIX_INDX_M) | PFINT_SB_CTL_CAUSE_ENA_M); wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_irq.index), ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S); @@ -3375,9 +3492,11 @@ static void ice_napi_add(struct ice_vsi *vsi) if (!vsi->netdev) return; - ice_for_each_q_vector(vsi, v_idx) + ice_for_each_q_vector(vsi, v_idx) { netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi, ice_napi_poll); + __ice_q_vector_set_napi_queues(vsi->q_vectors[v_idx], false); + } } /** @@ -3397,6 +3516,7 @@ static void ice_set_ops(struct ice_vsi *vsi) netdev->netdev_ops = &ice_netdev_ops; netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic; + netdev->xdp_metadata_ops = &ice_xdp_md_ops; ice_set_ethtool_ops(netdev); if (vsi->type != ICE_VSI_PF) @@ -4360,6 +4480,19 @@ static void ice_print_wake_reason(struct ice_pf *pf) dev_info(ice_pf_to_dev(pf), "Wake reason: %s", wake_str); } +/** + * ice_pf_fwlog_update_module - update 1 module + * @pf: pointer to the PF struct + * @log_level: log_level to use for the @module + * @module: module to update + */ +void ice_pf_fwlog_update_module(struct ice_pf *pf, int log_level, int module) +{ + struct ice_hw *hw = &pf->hw; + + hw->fwlog_cfg.module_entries[module].log_level = log_level; +} + /** * ice_register_netdev - register netdev * @vsi: pointer to the VSI struct @@ -4685,6 +4818,8 @@ static void ice_init_features(struct ice_pf *pf) if (ice_init_lag(pf)) dev_warn(dev, "Failed to init link aggregation support\n"); + + ice_hwmon_init(pf); } static void ice_deinit_features(struct ice_pf *pf) @@ -4702,6 +4837,8 @@ static void ice_deinit_features(struct ice_pf *pf) ice_ptp_release(pf); if (test_bit(ICE_FLAG_DPLL, pf->flags)) ice_dpll_deinit(pf); + if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) + xa_destroy(&pf->eswitch.reprs); } static void ice_init_wakeup(struct ice_pf *pf) @@ -5203,11 +5340,15 @@ static void ice_remove(struct pci_dev *pdev) msleep(100); } + ice_debugfs_exit(); + if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) { set_bit(ICE_VF_RESETS_DISABLED, pf->state); ice_free_vfs(pf); } + ice_hwmon_exit(pf); + ice_service_task_stop(pf); ice_aq_cancel_waiting_tasks(pf); set_bit(ICE_DOWN, pf->state); @@ -5306,6 +5447,7 @@ static int ice_reinit_interrupt_scheme(struct ice_pf *pf) if (ret) goto err_reinit; ice_vsi_map_rings_to_vectors(pf->vsi[v]); + ice_vsi_set_napi_queues(pf->vsi[v]); } ret = ice_req_irq_msix_misc(pf); @@ -5672,6 +5814,8 @@ static int __init ice_module_init(void) goto err_dest_wq; } + ice_debugfs_init(); + status = pci_register_driver(&ice_driver); if (status) { pr_err("failed to register PCI driver, err %d\n", status); @@ -5682,6 +5826,7 @@ static int __init ice_module_init(void) err_dest_lag_wq: destroy_workqueue(ice_lag_wq); + ice_debugfs_exit(); err_dest_wq: destroy_workqueue(ice_wq); return status; @@ -6040,6 +6185,23 @@ ice_fix_features(struct net_device *netdev, netdev_features_t features) return features; } +/** + * ice_set_rx_rings_vlan_proto - update rings with new stripped VLAN proto + * @vsi: PF's VSI + * @vlan_ethertype: VLAN ethertype (802.1Q or 802.1ad) in network byte order + * + * Store current stripped VLAN proto in ring packet context, + * so it can be accessed more efficiently by packet processing code. + */ +static void +ice_set_rx_rings_vlan_proto(struct ice_vsi *vsi, __be16 vlan_ethertype) +{ + u16 i; + + ice_for_each_alloc_rxq(vsi, i) + vsi->rx_rings[i]->pkt_ctx.vlan_proto = vlan_ethertype; +} + /** * ice_set_vlan_offload_features - set VLAN offload features for the PF VSI * @vsi: PF's VSI @@ -6082,6 +6244,9 @@ ice_set_vlan_offload_features(struct ice_vsi *vsi, netdev_features_t features) if (strip_err || insert_err) return -EIO; + ice_set_rx_rings_vlan_proto(vsi, enable_stripping ? + htons(vlan_ethertype) : 0); + return 0; } @@ -6668,13 +6833,11 @@ void ice_update_vsi_stats(struct ice_vsi *vsi) cur_ns->rx_crc_errors = pf->stats.crc_errors; cur_ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes + - pf->stats.rx_len_errors + pf->stats.rx_undersize + pf->hw_csum_rx_error + pf->stats.rx_jabber + pf->stats.rx_fragments + pf->stats.rx_oversize; - cur_ns->rx_length_errors = pf->stats.rx_len_errors; /* record drops from the port level */ cur_ns->rx_missed_errors = pf->stats.eth.rx_discards; } @@ -6814,9 +6977,6 @@ void ice_update_pf_stats(struct ice_pf *pf) &prev_ps->mac_remote_faults, &cur_ps->mac_remote_faults); - ice_stat_update32(hw, GLPRT_RLEC(port), pf->stat_prev_loaded, - &prev_ps->rx_len_errors, &cur_ps->rx_len_errors); - ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded, &prev_ps->rx_undersize, &cur_ps->rx_undersize); @@ -7399,9 +7559,9 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) goto err_vsi_rebuild; } - err = ice_vsi_rebuild_by_type(pf, ICE_VSI_SWITCHDEV_CTRL); + err = ice_eswitch_rebuild(pf); if (err) { - dev_err(dev, "Switchdev CTRL VSI rebuild failed: %d\n", err); + dev_err(dev, "Switchdev rebuild failed: %d\n", err); goto err_vsi_rebuild; } @@ -7701,6 +7861,59 @@ int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed) return status; } +/** + * ice_set_rss_hfunc - Set RSS HASH function + * @vsi: Pointer to VSI structure + * @hfunc: hash function (ICE_AQ_VSI_Q_OPT_RSS_*) + * + * Returns 0 on success, negative on failure + */ +int ice_set_rss_hfunc(struct ice_vsi *vsi, u8 hfunc) +{ + struct ice_hw *hw = &vsi->back->hw; + struct ice_vsi_ctx *ctx; + bool symm; + int err; + + if (hfunc == vsi->rss_hfunc) + return 0; + + if (hfunc != ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ && + hfunc != ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ) + return -EOPNOTSUPP; + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID); + ctx->info.q_opt_rss = vsi->info.q_opt_rss; + ctx->info.q_opt_rss &= ~ICE_AQ_VSI_Q_OPT_RSS_HASH_M; + ctx->info.q_opt_rss |= + FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_HASH_M, hfunc); + ctx->info.q_opt_tc = vsi->info.q_opt_tc; + ctx->info.q_opt_flags = vsi->info.q_opt_rss; + + err = ice_update_vsi(hw, vsi->idx, ctx, NULL); + if (err) { + dev_err(ice_pf_to_dev(vsi->back), "Failed to configure RSS hash for VSI %d, error %d\n", + vsi->vsi_num, err); + } else { + vsi->info.q_opt_rss = ctx->info.q_opt_rss; + vsi->rss_hfunc = hfunc; + netdev_info(vsi->netdev, "Hash function set to: %sToeplitz\n", + hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ ? + "Symmetric " : ""); + } + kfree(ctx); + if (err) + return err; + + /* Fix the symmetry setting for all existing RSS configurations */ + symm = !!(hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ); + return ice_set_rss_cfg_symm(hw, vsi, symm); +} + /** * ice_bridge_getlink - Get the hardware bridge mode * @skb: skb buff @@ -7889,8 +8102,8 @@ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue) struct ice_hw *hw = &pf->hw; u32 head, val = 0; - head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])) & - QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S; + head = FIELD_GET(QTX_COMM_HEAD_HEAD_M, + rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue]))); /* Read interrupt register */ val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx)); @@ -8138,13 +8351,12 @@ static int ice_add_vsi_to_fdir(struct ice_pf *pf, struct ice_vsi *vsi) for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) { enum ice_flow_priority prio; - u64 prof_id; /* add this VSI to FDir profile for this flow */ prio = ICE_FLOW_PRIO_NORMAL; prof = hw->fdir_prof[flow]; - prof_id = flow + tun * ICE_FLTR_PTYPE_MAX; - status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, + status = ice_flow_add_entry(hw, ICE_BLK_FD, + prof->prof_id[tun], prof->vsi_h[0], vsi->idx, prio, prof->fdir_seg[tun], &entry_h); diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c index f6f52a2480..d4e05d2cb3 100644 --- a/drivers/net/ethernet/intel/ice/ice_nvm.c +++ b/drivers/net/ethernet/intel/ice/ice_nvm.c @@ -571,8 +571,8 @@ ice_get_nvm_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_nv return status; } - nvm->major = (ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT; - nvm->minor = (ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT; + nvm->major = FIELD_GET(ICE_NVM_VER_HI_MASK, ver); + nvm->minor = FIELD_GET(ICE_NVM_VER_LO_MASK, ver); status = ice_read_nvm_sr_copy(hw, bank, ICE_SR_NVM_EETRACK_LO, &eetrack_lo); if (status) { @@ -706,9 +706,9 @@ ice_get_orom_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_o combo_ver = le32_to_cpu(civd.combo_ver); - orom->major = (u8)((combo_ver & ICE_OROM_VER_MASK) >> ICE_OROM_VER_SHIFT); - orom->patch = (u8)(combo_ver & ICE_OROM_VER_PATCH_MASK); - orom->build = (u16)((combo_ver & ICE_OROM_VER_BUILD_MASK) >> ICE_OROM_VER_BUILD_SHIFT); + orom->major = FIELD_GET(ICE_OROM_VER_MASK, combo_ver); + orom->patch = FIELD_GET(ICE_OROM_VER_PATCH_MASK, combo_ver); + orom->build = FIELD_GET(ICE_OROM_VER_BUILD_MASK, combo_ver); return 0; } @@ -950,7 +950,8 @@ static int ice_determine_active_flash_banks(struct ice_hw *hw) } /* Check that the control word indicates validity */ - if ((ctrl_word & ICE_SR_CTRL_WORD_1_M) >> ICE_SR_CTRL_WORD_1_S != ICE_SR_CTRL_WORD_VALID) { + if (FIELD_GET(ICE_SR_CTRL_WORD_1_M, ctrl_word) != + ICE_SR_CTRL_WORD_VALID) { ice_debug(hw, ICE_DBG_NVM, "Shadow RAM control word is invalid\n"); return -EIO; } @@ -1027,7 +1028,7 @@ int ice_init_nvm(struct ice_hw *hw) * as the blank mode may be used in the factory line. */ gens_stat = rd32(hw, GLNVM_GENS); - sr_size = (gens_stat & GLNVM_GENS_SR_SIZE_M) >> GLNVM_GENS_SR_SIZE_S; + sr_size = FIELD_GET(GLNVM_GENS_SR_SIZE_M, gens_stat); /* Switching to words (sr_size contains power of 2) */ flash->sr_words = BIT(sr_size) * ICE_SR_WORDS_IN_1KB; diff --git a/drivers/net/ethernet/intel/ice/ice_osdep.h b/drivers/net/ethernet/intel/ice/ice_osdep.h index 82bc54fec7..a2562f0426 100644 --- a/drivers/net/ethernet/intel/ice/ice_osdep.h +++ b/drivers/net/ethernet/intel/ice/ice_osdep.h @@ -24,7 +24,7 @@ #define rd64(a, reg) readq((a)->hw_addr + (reg)) #define ice_flush(a) rd32((a), GLGEN_STAT) -#define ICE_M(m, s) ((m) << (s)) +#define ICE_M(m, s) ((m ## U) << (s)) struct ice_dma_mem { void *va; diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c index e6b1ce76ca..3b6605c858 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp.c @@ -7,7 +7,7 @@ #define E810_OUT_PROP_DELAY_NS 1 -#define UNKNOWN_INCVAL_E822 0x100000000ULL +#define UNKNOWN_INCVAL_E82X 0x100000000ULL static const struct ptp_pin_desc ice_pin_desc_e810t[] = { /* name idx func chan */ @@ -524,6 +524,119 @@ ice_ptp_is_tx_tracker_up(struct ice_ptp_tx *tx) return tx->init && !tx->calibrating; } +/** + * ice_ptp_req_tx_single_tstamp - Request Tx timestamp for a port from FW + * @tx: the PTP Tx timestamp tracker + * @idx: index of the timestamp to request + */ +void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx) +{ + struct ice_ptp_port *ptp_port; + struct sk_buff *skb; + struct ice_pf *pf; + + if (!tx->init) + return; + + ptp_port = container_of(tx, struct ice_ptp_port, tx); + pf = ptp_port_to_pf(ptp_port); + + /* Drop packets which have waited for more than 2 seconds */ + if (time_is_before_jiffies(tx->tstamps[idx].start + 2 * HZ)) { + /* Count the number of Tx timestamps that timed out */ + pf->ptp.tx_hwtstamp_timeouts++; + + skb = tx->tstamps[idx].skb; + tx->tstamps[idx].skb = NULL; + clear_bit(idx, tx->in_use); + + dev_kfree_skb_any(skb); + return; + } + + ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx); + + /* Write TS index to read to the PF register so the FW can read it */ + wr32(&pf->hw, PF_SB_ATQBAL, + TS_LL_READ_TS_INTR | FIELD_PREP(TS_LL_READ_TS_IDX, idx) | + TS_LL_READ_TS); + tx->last_ll_ts_idx_read = idx; +} + +/** + * ice_ptp_complete_tx_single_tstamp - Complete Tx timestamp for a port + * @tx: the PTP Tx timestamp tracker + */ +void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx) +{ + struct skb_shared_hwtstamps shhwtstamps = {}; + u8 idx = tx->last_ll_ts_idx_read; + struct ice_ptp_port *ptp_port; + u64 raw_tstamp, tstamp; + bool drop_ts = false; + struct sk_buff *skb; + struct ice_pf *pf; + u32 val; + + if (!tx->init || tx->last_ll_ts_idx_read < 0) + return; + + ptp_port = container_of(tx, struct ice_ptp_port, tx); + pf = ptp_port_to_pf(ptp_port); + + ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx); + + val = rd32(&pf->hw, PF_SB_ATQBAL); + + /* When the bit is cleared, the TS is ready in the register */ + if (val & TS_LL_READ_TS) { + dev_err(ice_pf_to_dev(pf), "Failed to get the Tx tstamp - FW not ready"); + return; + } + + /* High 8 bit value of the TS is on the bits 16:23 */ + raw_tstamp = FIELD_GET(TS_LL_READ_TS_HIGH, val); + raw_tstamp <<= 32; + + /* Read the low 32 bit value */ + raw_tstamp |= (u64)rd32(&pf->hw, PF_SB_ATQBAH); + + /* For PHYs which don't implement a proper timestamp ready bitmap, + * verify that the timestamp value is different from the last cached + * timestamp. If it is not, skip this for now assuming it hasn't yet + * been captured by hardware. + */ + if (!drop_ts && tx->verify_cached && + raw_tstamp == tx->tstamps[idx].cached_tstamp) + return; + + if (tx->verify_cached && raw_tstamp) + tx->tstamps[idx].cached_tstamp = raw_tstamp; + clear_bit(idx, tx->in_use); + skb = tx->tstamps[idx].skb; + tx->tstamps[idx].skb = NULL; + if (test_and_clear_bit(idx, tx->stale)) + drop_ts = true; + + if (!skb) + return; + + if (drop_ts) { + dev_kfree_skb_any(skb); + return; + } + + /* Extend the timestamp using cached PHC time */ + tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp); + if (tstamp) { + shhwtstamps.hwtstamp = ns_to_ktime(tstamp); + ice_trace(tx_tstamp_complete, skb, idx); + } + + skb_tstamp_tx(skb, &shhwtstamps); + dev_kfree_skb_any(skb); +} + /** * ice_ptp_process_tx_tstamp - Process Tx timestamps for a port * @tx: the PTP Tx timestamp tracker @@ -575,6 +688,7 @@ ice_ptp_is_tx_tracker_up(struct ice_ptp_tx *tx) static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx) { struct ice_ptp_port *ptp_port; + unsigned long flags; struct ice_pf *pf; struct ice_hw *hw; u64 tstamp_ready; @@ -646,7 +760,7 @@ static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx) drop_ts = true; skip_ts_read: - spin_lock(&tx->lock); + spin_lock_irqsave(&tx->lock, flags); if (tx->verify_cached && raw_tstamp) tx->tstamps[idx].cached_tstamp = raw_tstamp; clear_bit(idx, tx->in_use); @@ -654,7 +768,7 @@ skip_ts_read: tx->tstamps[idx].skb = NULL; if (test_and_clear_bit(idx, tx->stale)) drop_ts = true; - spin_unlock(&tx->lock); + spin_unlock_irqrestore(&tx->lock, flags); /* It is unlikely but possible that the SKB will have been * flushed at this point due to link change or teardown. @@ -705,7 +819,9 @@ static enum ice_tx_tstamp_work ice_ptp_tx_tstamp_owner(struct ice_pf *pf) /* Read the Tx ready status first */ err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready); - if (err || tstamp_ready) + if (err) + break; + else if (tstamp_ready) return ICE_TX_TSTAMP_WORK_PENDING; } @@ -722,6 +838,7 @@ static enum ice_tx_tstamp_work ice_ptp_tx_tstamp_owner(struct ice_pf *pf) static enum ice_tx_tstamp_work ice_ptp_tx_tstamp(struct ice_ptp_tx *tx) { bool more_timestamps; + unsigned long flags; if (!tx->init) return ICE_TX_TSTAMP_WORK_DONE; @@ -730,9 +847,9 @@ static enum ice_tx_tstamp_work ice_ptp_tx_tstamp(struct ice_ptp_tx *tx) ice_ptp_process_tx_tstamp(tx); /* Check if there are outstanding Tx timestamps */ - spin_lock(&tx->lock); + spin_lock_irqsave(&tx->lock, flags); more_timestamps = tx->init && !bitmap_empty(tx->in_use, tx->len); - spin_unlock(&tx->lock); + spin_unlock_irqrestore(&tx->lock, flags); if (more_timestamps) return ICE_TX_TSTAMP_WORK_PENDING; @@ -769,6 +886,7 @@ ice_ptp_alloc_tx_tracker(struct ice_ptp_tx *tx) tx->in_use = in_use; tx->stale = stale; tx->init = 1; + tx->last_ll_ts_idx_read = -1; spin_lock_init(&tx->lock); @@ -786,6 +904,7 @@ static void ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx) { struct ice_hw *hw = &pf->hw; + unsigned long flags; u64 tstamp_ready; int err; u8 idx; @@ -809,12 +928,12 @@ ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx) if (!hw->reset_ongoing && (tstamp_ready & BIT_ULL(phy_idx))) ice_clear_phy_tstamp(hw, tx->block, phy_idx); - spin_lock(&tx->lock); + spin_lock_irqsave(&tx->lock, flags); skb = tx->tstamps[idx].skb; tx->tstamps[idx].skb = NULL; clear_bit(idx, tx->in_use); clear_bit(idx, tx->stale); - spin_unlock(&tx->lock); + spin_unlock_irqrestore(&tx->lock, flags); /* Count the number of Tx timestamps flushed */ pf->ptp.tx_hwtstamp_flushed++; @@ -838,9 +957,11 @@ ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx) static void ice_ptp_mark_tx_tracker_stale(struct ice_ptp_tx *tx) { - spin_lock(&tx->lock); + unsigned long flags; + + spin_lock_irqsave(&tx->lock, flags); bitmap_or(tx->stale, tx->stale, tx->in_use, tx->len); - spin_unlock(&tx->lock); + spin_unlock_irqrestore(&tx->lock, flags); } /** @@ -853,9 +974,11 @@ ice_ptp_mark_tx_tracker_stale(struct ice_ptp_tx *tx) static void ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx) { - spin_lock(&tx->lock); + unsigned long flags; + + spin_lock_irqsave(&tx->lock, flags); tx->init = 0; - spin_unlock(&tx->lock); + spin_unlock_irqrestore(&tx->lock, flags); /* wait for potentially outstanding interrupt to complete */ synchronize_irq(pf->oicr_irq.virq); @@ -875,7 +998,7 @@ ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx) } /** - * ice_ptp_init_tx_e822 - Initialize tracking for Tx timestamps + * ice_ptp_init_tx_e82x - Initialize tracking for Tx timestamps * @pf: Board private structure * @tx: the Tx tracking structure to initialize * @port: the port this structure tracks @@ -886,11 +1009,11 @@ ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx) * registers into chunks based on the port number. */ static int -ice_ptp_init_tx_e822(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port) +ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port) { tx->block = port / ICE_PORTS_PER_QUAD; - tx->offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT_E822; - tx->len = INDEX_PER_PORT_E822; + tx->offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT_E82X; + tx->len = INDEX_PER_PORT_E82X; tx->verify_cached = 0; return ice_ptp_alloc_tx_tracker(tx); @@ -1093,10 +1216,10 @@ static u64 ice_base_incval(struct ice_pf *pf) if (ice_is_e810(hw)) incval = ICE_PTP_NOMINAL_INCVAL_E810; - else if (ice_e822_time_ref(hw) < NUM_ICE_TIME_REF_FREQ) - incval = ice_e822_nominal_incval(ice_e822_time_ref(hw)); + else if (ice_e82x_time_ref(hw) < NUM_ICE_TIME_REF_FREQ) + incval = ice_e82x_nominal_incval(ice_e82x_time_ref(hw)); else - incval = UNKNOWN_INCVAL_E822; + incval = UNKNOWN_INCVAL_E82X; dev_dbg(ice_pf_to_dev(pf), "PTP: using base increment value of 0x%016llx\n", incval); @@ -1125,10 +1248,10 @@ static int ice_ptp_check_tx_fifo(struct ice_ptp_port *port) /* need to read FIFO state */ if (offs == 0 || offs == 1) - err = ice_read_quad_reg_e822(hw, quad, Q_REG_FIFO01_STATUS, + err = ice_read_quad_reg_e82x(hw, quad, Q_REG_FIFO01_STATUS, &val); else - err = ice_read_quad_reg_e822(hw, quad, Q_REG_FIFO23_STATUS, + err = ice_read_quad_reg_e82x(hw, quad, Q_REG_FIFO23_STATUS, &val); if (err) { @@ -1138,9 +1261,9 @@ static int ice_ptp_check_tx_fifo(struct ice_ptp_port *port) } if (offs & 0x1) - phy_sts = (val & Q_REG_FIFO13_M) >> Q_REG_FIFO13_S; + phy_sts = FIELD_GET(Q_REG_FIFO13_M, val); else - phy_sts = (val & Q_REG_FIFO02_M) >> Q_REG_FIFO02_S; + phy_sts = FIELD_GET(Q_REG_FIFO02_M, val); if (phy_sts & FIFO_EMPTY) { port->tx_fifo_busy_cnt = FIFO_OK; @@ -1156,7 +1279,7 @@ static int ice_ptp_check_tx_fifo(struct ice_ptp_port *port) dev_dbg(ice_pf_to_dev(pf), "Port %d Tx FIFO still not empty; resetting quad %d\n", port->port_num, quad); - ice_ptp_reset_ts_memory_quad_e822(hw, quad); + ice_ptp_reset_ts_memory_quad_e82x(hw, quad); port->tx_fifo_busy_cnt = FIFO_OK; return 0; } @@ -1201,8 +1324,8 @@ static void ice_ptp_wait_for_offsets(struct kthread_work *work) tx_err = ice_ptp_check_tx_fifo(port); if (!tx_err) - tx_err = ice_phy_cfg_tx_offset_e822(hw, port->port_num); - rx_err = ice_phy_cfg_rx_offset_e822(hw, port->port_num); + tx_err = ice_phy_cfg_tx_offset_e82x(hw, port->port_num); + rx_err = ice_phy_cfg_rx_offset_e82x(hw, port->port_num); if (tx_err || rx_err) { /* Tx and/or Rx offset not yet configured, try again later */ kthread_queue_delayed_work(pf->ptp.kworker, @@ -1231,7 +1354,7 @@ ice_ptp_port_phy_stop(struct ice_ptp_port *ptp_port) kthread_cancel_delayed_work_sync(&ptp_port->ov_work); - err = ice_stop_phy_timer_e822(hw, port, true); + err = ice_stop_phy_timer_e82x(hw, port, true); if (err) dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d down, err %d\n", port, err); @@ -1255,6 +1378,7 @@ ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port) struct ice_pf *pf = ptp_port_to_pf(ptp_port); u8 port = ptp_port->port_num; struct ice_hw *hw = &pf->hw; + unsigned long flags; int err; if (ice_is_e810(hw)) @@ -1268,20 +1392,20 @@ ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port) kthread_cancel_delayed_work_sync(&ptp_port->ov_work); /* temporarily disable Tx timestamps while calibrating PHY offset */ - spin_lock(&ptp_port->tx.lock); + spin_lock_irqsave(&ptp_port->tx.lock, flags); ptp_port->tx.calibrating = true; - spin_unlock(&ptp_port->tx.lock); + spin_unlock_irqrestore(&ptp_port->tx.lock, flags); ptp_port->tx_fifo_busy_cnt = 0; /* Start the PHY timer in Vernier mode */ - err = ice_start_phy_timer_e822(hw, port); + err = ice_start_phy_timer_e82x(hw, port); if (err) goto out_unlock; /* Enable Tx timestamps right away */ - spin_lock(&ptp_port->tx.lock); + spin_lock_irqsave(&ptp_port->tx.lock, flags); ptp_port->tx.calibrating = false; - spin_unlock(&ptp_port->tx.lock); + spin_unlock_irqrestore(&ptp_port->tx.lock, flags); kthread_queue_delayed_work(pf->ptp.kworker, &ptp_port->ov_work, 0); @@ -1323,7 +1447,7 @@ void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup) case ICE_PHY_E810: /* Do not reconfigure E810 PHY */ return; - case ICE_PHY_E822: + case ICE_PHY_E82X: ice_ptp_port_phy_restart(ptp_port); return; default: @@ -1349,7 +1473,7 @@ static int ice_ptp_tx_ena_intr(struct ice_pf *pf, bool ena, u32 threshold) ice_ptp_reset_ts_memory(hw); for (quad = 0; quad < ICE_MAX_QUAD; quad++) { - err = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG, + err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG, &val); if (err) break; @@ -1357,13 +1481,13 @@ static int ice_ptp_tx_ena_intr(struct ice_pf *pf, bool ena, u32 threshold) if (ena) { val |= Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M; val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_THR_M; - val |= ((threshold << Q_REG_TX_MEM_GBL_CFG_INTR_THR_S) & - Q_REG_TX_MEM_GBL_CFG_INTR_THR_M); + val |= FIELD_PREP(Q_REG_TX_MEM_GBL_CFG_INTR_THR_M, + threshold); } else { val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M; } - err = ice_write_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG, + err = ice_write_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG, val); if (err) break; @@ -1503,8 +1627,7 @@ ice_ptp_cfg_extts(struct ice_pf *pf, bool ena, unsigned int chan, u32 gpio_pin, * + num_in_channels * tmr_idx */ func = 1 + chan + (tmr_idx * 3); - gpio_reg = ((func << GLGEN_GPIO_CTL_PIN_FUNC_S) & - GLGEN_GPIO_CTL_PIN_FUNC_M); + gpio_reg = FIELD_PREP(GLGEN_GPIO_CTL_PIN_FUNC_M, func); pf->ptp.ext_ts_chan |= (1 << chan); } else { /* clear the values we set to reset defaults */ @@ -1601,7 +1724,7 @@ static int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan, if (ice_is_e810(hw)) start_time -= E810_OUT_PROP_DELAY_NS; else - start_time -= ice_e822_pps_delay(ice_e822_time_ref(hw)); + start_time -= ice_e82x_pps_delay(ice_e82x_time_ref(hw)); /* 2. Write TARGET time */ wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), lower_32_bits(start_time)); @@ -1614,7 +1737,7 @@ static int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan, /* 4. write GPIO CTL reg */ func = 8 + chan + (tmr_idx * 4); val = GLGEN_GPIO_CTL_PIN_DIR_M | - ((func << GLGEN_GPIO_CTL_PIN_FUNC_S) & GLGEN_GPIO_CTL_PIN_FUNC_M); + FIELD_PREP(GLGEN_GPIO_CTL_PIN_FUNC_M, func); wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val); /* Store the value if requested */ @@ -1840,7 +1963,7 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts) ice_ptp_enable_all_clkout(pf); /* Recalibrate and re-enable timestamp blocks for E822/E823 */ - if (hw->phy_model == ICE_PHY_E822) + if (hw->phy_model == ICE_PHY_E82X) ice_ptp_restart_all_phy(pf); exit: if (err) { @@ -2127,30 +2250,26 @@ int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr) } /** - * ice_ptp_rx_hwtstamp - Check for an Rx timestamp - * @rx_ring: Ring to get the VSI info + * ice_ptp_get_rx_hwts - Get packet Rx timestamp in ns * @rx_desc: Receive descriptor - * @skb: Particular skb to send timestamp with + * @pkt_ctx: Packet context to get the cached time * * The driver receives a notification in the receive descriptor with timestamp. - * The timestamp is in ns, so we must convert the result first. */ -void -ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring, - union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb) +u64 ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc, + const struct ice_pkt_ctx *pkt_ctx) { - struct skb_shared_hwtstamps *hwtstamps; u64 ts_ns, cached_time; u32 ts_high; if (!(rx_desc->wb.time_stamp_low & ICE_PTP_TS_VALID)) - return; + return 0; - cached_time = READ_ONCE(rx_ring->cached_phctime); + cached_time = READ_ONCE(pkt_ctx->cached_phctime); /* Do not report a timestamp if we don't have a cached PHC time */ if (!cached_time) - return; + return 0; /* Use ice_ptp_extend_32b_ts directly, using the ring-specific cached * PHC value, rather than accessing the PF. This also allows us to @@ -2161,9 +2280,7 @@ ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring, ts_high = le32_to_cpu(rx_desc->wb.flex_ts.ts_high); ts_ns = ice_ptp_extend_32b_ts(cached_time, ts_high); - hwtstamps = skb_hwtstamps(skb); - memset(hwtstamps, 0, sizeof(*hwtstamps)); - hwtstamps->hwtstamp = ns_to_ktime(ts_ns); + return ts_ns; } /** @@ -2378,18 +2495,23 @@ static long ice_ptp_create_clock(struct ice_pf *pf) */ s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb) { + unsigned long flags; u8 idx; - spin_lock(&tx->lock); + spin_lock_irqsave(&tx->lock, flags); /* Check that this tracker is accepting new timestamp requests */ if (!ice_ptp_is_tx_tracker_up(tx)) { - spin_unlock(&tx->lock); + spin_unlock_irqrestore(&tx->lock, flags); return -1; } /* Find and set the first available index */ - idx = find_first_zero_bit(tx->in_use, tx->len); + idx = find_next_zero_bit(tx->in_use, tx->len, + tx->last_ll_ts_idx_read + 1); + if (idx == tx->len) + idx = find_first_zero_bit(tx->in_use, tx->len); + if (idx < tx->len) { /* We got a valid index that no other thread could have set. Store * a reference to the skb and the start time to allow discarding old @@ -2403,7 +2525,7 @@ s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb) ice_trace(tx_tstamp_request, skb, idx); } - spin_unlock(&tx->lock); + spin_unlock_irqrestore(&tx->lock, flags); /* return the appropriate PHY timestamp register index, -1 if no * indexes were available. @@ -2440,6 +2562,54 @@ enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf) } } +/** + * ice_ptp_maybe_trigger_tx_interrupt - Trigger Tx timstamp interrupt + * @pf: Board private structure + * + * The device PHY issues Tx timestamp interrupts to the driver for processing + * timestamp data from the PHY. It will not interrupt again until all + * current timestamp data is read. In rare circumstances, it is possible that + * the driver fails to read all outstanding data. + * + * To avoid getting permanently stuck, periodically check if the PHY has + * outstanding timestamp data. If so, trigger an interrupt from software to + * process this data. + */ +static void ice_ptp_maybe_trigger_tx_interrupt(struct ice_pf *pf) +{ + struct device *dev = ice_pf_to_dev(pf); + struct ice_hw *hw = &pf->hw; + bool trigger_oicr = false; + unsigned int i; + + if (ice_is_e810(hw)) + return; + + if (!ice_pf_src_tmr_owned(pf)) + return; + + for (i = 0; i < ICE_MAX_QUAD; i++) { + u64 tstamp_ready; + int err; + + err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready); + if (!err && tstamp_ready) { + trigger_oicr = true; + break; + } + } + + if (trigger_oicr) { + /* Trigger a software interrupt, to ensure this data + * gets processed. + */ + dev_dbg(dev, "PTP periodic task detected waiting timestamps. Triggering Tx timestamp interrupt now.\n"); + + wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M); + ice_flush(hw); + } +} + static void ice_ptp_periodic_work(struct kthread_work *work) { struct ice_ptp *ptp = container_of(work, struct ice_ptp, work.work); @@ -2451,6 +2621,8 @@ static void ice_ptp_periodic_work(struct kthread_work *work) err = ice_ptp_update_cached_phctime(pf); + ice_ptp_maybe_trigger_tx_interrupt(pf); + /* Run twice a second or reschedule if phc update failed */ kthread_queue_delayed_work(ptp->kworker, &ptp->work, msecs_to_jiffies(err ? 10 : 500)); @@ -2468,12 +2640,10 @@ void ice_ptp_reset(struct ice_pf *pf) int err, itr = 1; u64 time_diff; - if (test_bit(ICE_PFR_REQ, pf->state)) + if (test_bit(ICE_PFR_REQ, pf->state) || + !ice_pf_src_tmr_owned(pf)) goto pfr; - if (!ice_pf_src_tmr_owned(pf)) - goto reset_ts; - err = ice_ptp_init_phc(hw); if (err) goto err; @@ -2517,10 +2687,6 @@ void ice_ptp_reset(struct ice_pf *pf) goto err; } -reset_ts: - /* Restart the PHY timestamping block */ - ice_ptp_reset_phy_timestamping(pf); - pfr: /* Init Tx structures */ if (ice_is_e810(&pf->hw)) { @@ -2528,7 +2694,7 @@ pfr: } else { kthread_init_delayed_work(&ptp->port.ov_work, ice_ptp_wait_for_offsets); - err = ice_ptp_init_tx_e822(pf, &ptp->port.tx, + err = ice_ptp_init_tx_e82x(pf, &ptp->port.tx, ptp->port.port_num); } if (err) @@ -2536,6 +2702,11 @@ pfr: set_bit(ICE_FLAG_PTP, pf->flags); + /* Restart the PHY timestamping block */ + if (!test_bit(ICE_PFR_REQ, pf->state) && + ice_pf_src_tmr_owned(pf)) + ice_ptp_restart_all_phy(pf); + /* Start periodic work going */ kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0); @@ -2898,11 +3069,11 @@ static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port) switch (hw->phy_model) { case ICE_PHY_E810: return ice_ptp_init_tx_e810(pf, &ptp_port->tx); - case ICE_PHY_E822: + case ICE_PHY_E82X: kthread_init_delayed_work(&ptp_port->ov_work, ice_ptp_wait_for_offsets); - return ice_ptp_init_tx_e822(pf, &ptp_port->tx, + return ice_ptp_init_tx_e82x(pf, &ptp_port->tx, ptp_port->port_num); default: return -ENODEV; @@ -2991,7 +3162,7 @@ static void ice_ptp_remove_auxbus_device(struct ice_pf *pf) static void ice_ptp_init_tx_interrupt_mode(struct ice_pf *pf) { switch (pf->hw.phy_model) { - case ICE_PHY_E822: + case ICE_PHY_E82X: /* E822 based PHY has the clock owner process the interrupt * for all ports. */ diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h index 06a330867f..087dd32d87 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.h +++ b/drivers/net/ethernet/intel/ice/ice_ptp.h @@ -131,6 +131,7 @@ enum ice_tx_tstamp_work { * @calibrating: if true, the PHY is calibrating the Tx offset. During this * window, timestamps are temporarily disabled. * @verify_cached: if true, verify new timestamp differs from last read value + * @last_ll_ts_idx_read: index of the last LL TS read by the FW */ struct ice_ptp_tx { spinlock_t lock; /* lock protecting in_use bitmap */ @@ -143,11 +144,12 @@ struct ice_ptp_tx { u8 init : 1; u8 calibrating : 1; u8 verify_cached : 1; + s8 last_ll_ts_idx_read; }; /* Quad and port information for initializing timestamp blocks */ #define INDEX_PER_QUAD 64 -#define INDEX_PER_PORT_E822 16 +#define INDEX_PER_PORT_E82X 16 #define INDEX_PER_PORT_E810 64 /** @@ -296,11 +298,12 @@ void ice_ptp_restore_timestamp_mode(struct ice_pf *pf); void ice_ptp_extts_event(struct ice_pf *pf); s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb); +void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx); +void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx); enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf); -void -ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring, - union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb); +u64 ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc, + const struct ice_pkt_ctx *pkt_ctx); void ice_ptp_reset(struct ice_pf *pf); void ice_ptp_prepare_for_reset(struct ice_pf *pf); void ice_ptp_init(struct ice_pf *pf); @@ -325,13 +328,23 @@ ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb) return -1; } +static inline void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx) +{ } + +static inline void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx) { } + static inline bool ice_ptp_process_ts(struct ice_pf *pf) { return true; } -static inline void -ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring, - union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb) { } + +static inline u64 +ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc, + const struct ice_pkt_ctx *pkt_ctx) +{ + return 0; +} + static inline void ice_ptp_reset(struct ice_pf *pf) { } static inline void ice_ptp_prepare_for_reset(struct ice_pf *pf) { } static inline void ice_ptp_init(struct ice_pf *pf) { } diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h index 4109aa3b2f..2c4dab0c48 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h +++ b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h @@ -9,17 +9,17 @@ */ /* Constants defined for the PTP 1588 clock hardware. */ -/* struct ice_time_ref_info_e822 +/* struct ice_time_ref_info_e82x * * E822 hardware can use different sources as the reference for the PTP * hardware clock. Each clock has different characteristics such as a slightly * different frequency, etc. * * This lookup table defines several constants that depend on the current time - * reference. See the struct ice_time_ref_info_e822 for information about the + * reference. See the struct ice_time_ref_info_e82x for information about the * meaning of each constant. */ -const struct ice_time_ref_info_e822 e822_time_ref[NUM_ICE_TIME_REF_FREQ] = { +const struct ice_time_ref_info_e82x e822_time_ref[NUM_ICE_TIME_REF_FREQ] = { /* ICE_TIME_REF_FREQ_25_000 -> 25 MHz */ { /* pll_freq */ @@ -81,7 +81,7 @@ const struct ice_time_ref_info_e822 e822_time_ref[NUM_ICE_TIME_REF_FREQ] = { }, }; -const struct ice_cgu_pll_params_e822 e822_cgu_params[NUM_ICE_TIME_REF_FREQ] = { +const struct ice_cgu_pll_params_e82x e822_cgu_params[NUM_ICE_TIME_REF_FREQ] = { /* ICE_TIME_REF_FREQ_25_000 -> 25 MHz */ { /* refclk_pre_div */ @@ -155,7 +155,7 @@ const struct ice_cgu_pll_params_e822 e822_cgu_params[NUM_ICE_TIME_REF_FREQ] = { }, }; -/* struct ice_vernier_info_e822 +/* struct ice_vernier_info_e82x * * E822 hardware calibrates the delay of the timestamp indication from the * actual packet transmission or reception during the initialization of the @@ -168,7 +168,7 @@ const struct ice_cgu_pll_params_e822 e822_cgu_params[NUM_ICE_TIME_REF_FREQ] = { * used by this link speed, and that the register should be cleared by writing * 0. Other values specify the clock frequency in Hz. */ -const struct ice_vernier_info_e822 e822_vernier[NUM_ICE_PTP_LNK_SPD] = { +const struct ice_vernier_info_e82x e822_vernier[NUM_ICE_PTP_LNK_SPD] = { /* ICE_PTP_LNK_SPD_1G */ { /* tx_par_clk */ diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c index a00b55e14a..187ce9b54e 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c @@ -284,19 +284,19 @@ static void ice_ptp_exec_tmr_cmd(struct ice_hw *hw) */ /** - * ice_fill_phy_msg_e822 - Fill message data for a PHY register access + * ice_fill_phy_msg_e82x - Fill message data for a PHY register access * @msg: the PHY message buffer to fill in * @port: the port to access * @offset: the register offset */ static void -ice_fill_phy_msg_e822(struct ice_sbq_msg_input *msg, u8 port, u16 offset) +ice_fill_phy_msg_e82x(struct ice_sbq_msg_input *msg, u8 port, u16 offset) { int phy_port, phy, quadtype; - phy_port = port % ICE_PORTS_PER_PHY_E822; - phy = port / ICE_PORTS_PER_PHY_E822; - quadtype = (port / ICE_PORTS_PER_QUAD) % ICE_QUADS_PER_PHY_E822; + phy_port = port % ICE_PORTS_PER_PHY_E82X; + phy = port / ICE_PORTS_PER_PHY_E82X; + quadtype = (port / ICE_PORTS_PER_QUAD) % ICE_QUADS_PER_PHY_E82X; if (quadtype == 0) { msg->msg_addr_low = P_Q0_L(P_0_BASE + offset, phy_port); @@ -315,7 +315,7 @@ ice_fill_phy_msg_e822(struct ice_sbq_msg_input *msg, u8 port, u16 offset) } /** - * ice_is_64b_phy_reg_e822 - Check if this is a 64bit PHY register + * ice_is_64b_phy_reg_e82x - Check if this is a 64bit PHY register * @low_addr: the low address to check * @high_addr: on return, contains the high address of the 64bit register * @@ -323,7 +323,7 @@ ice_fill_phy_msg_e822(struct ice_sbq_msg_input *msg, u8 port, u16 offset) * represented as two 32bit registers. If it is, return the appropriate high * register offset to use. */ -static bool ice_is_64b_phy_reg_e822(u16 low_addr, u16 *high_addr) +static bool ice_is_64b_phy_reg_e82x(u16 low_addr, u16 *high_addr) { switch (low_addr) { case P_REG_PAR_PCS_TX_OFFSET_L: @@ -368,7 +368,7 @@ static bool ice_is_64b_phy_reg_e822(u16 low_addr, u16 *high_addr) } /** - * ice_is_40b_phy_reg_e822 - Check if this is a 40bit PHY register + * ice_is_40b_phy_reg_e82x - Check if this is a 40bit PHY register * @low_addr: the low address to check * @high_addr: on return, contains the high address of the 40bit value * @@ -377,7 +377,7 @@ static bool ice_is_64b_phy_reg_e822(u16 low_addr, u16 *high_addr) * upper 32 bits in the high register. If it is, return the appropriate high * register offset to use. */ -static bool ice_is_40b_phy_reg_e822(u16 low_addr, u16 *high_addr) +static bool ice_is_40b_phy_reg_e82x(u16 low_addr, u16 *high_addr) { switch (low_addr) { case P_REG_TIMETUS_L: @@ -413,7 +413,7 @@ static bool ice_is_40b_phy_reg_e822(u16 low_addr, u16 *high_addr) } /** - * ice_read_phy_reg_e822 - Read a PHY register + * ice_read_phy_reg_e82x - Read a PHY register * @hw: pointer to the HW struct * @port: PHY port to read from * @offset: PHY register offset to read @@ -422,12 +422,12 @@ static bool ice_is_40b_phy_reg_e822(u16 low_addr, u16 *high_addr) * Read a PHY register for the given port over the device sideband queue. */ static int -ice_read_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 *val) +ice_read_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 offset, u32 *val) { struct ice_sbq_msg_input msg = {0}; int err; - ice_fill_phy_msg_e822(&msg, port, offset); + ice_fill_phy_msg_e82x(&msg, port, offset); msg.opcode = ice_sbq_msg_rd; err = ice_sbq_rw_reg(hw, &msg); @@ -443,7 +443,7 @@ ice_read_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 *val) } /** - * ice_read_64b_phy_reg_e822 - Read a 64bit value from PHY registers + * ice_read_64b_phy_reg_e82x - Read a 64bit value from PHY registers * @hw: pointer to the HW struct * @port: PHY port to read from * @low_addr: offset of the lower register to read from @@ -455,7 +455,7 @@ ice_read_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 *val) * known to be two parts of a 64bit value. */ static int -ice_read_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 *val) +ice_read_64b_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 low_addr, u64 *val) { u32 low, high; u16 high_addr; @@ -464,20 +464,20 @@ ice_read_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 *val) /* Only operate on registers known to be split into two 32bit * registers. */ - if (!ice_is_64b_phy_reg_e822(low_addr, &high_addr)) { + if (!ice_is_64b_phy_reg_e82x(low_addr, &high_addr)) { ice_debug(hw, ICE_DBG_PTP, "Invalid 64b register addr 0x%08x\n", low_addr); return -EINVAL; } - err = ice_read_phy_reg_e822(hw, port, low_addr, &low); + err = ice_read_phy_reg_e82x(hw, port, low_addr, &low); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to read from low register 0x%08x\n, err %d", low_addr, err); return err; } - err = ice_read_phy_reg_e822(hw, port, high_addr, &high); + err = ice_read_phy_reg_e82x(hw, port, high_addr, &high); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to read from high register 0x%08x\n, err %d", high_addr, err); @@ -490,7 +490,7 @@ ice_read_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 *val) } /** - * ice_write_phy_reg_e822 - Write a PHY register + * ice_write_phy_reg_e82x - Write a PHY register * @hw: pointer to the HW struct * @port: PHY port to write to * @offset: PHY register offset to write @@ -499,12 +499,12 @@ ice_read_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 *val) * Write a PHY register for the given port over the device sideband queue. */ static int -ice_write_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 val) +ice_write_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 offset, u32 val) { struct ice_sbq_msg_input msg = {0}; int err; - ice_fill_phy_msg_e822(&msg, port, offset); + ice_fill_phy_msg_e82x(&msg, port, offset); msg.opcode = ice_sbq_msg_wr; msg.data = val; @@ -519,7 +519,7 @@ ice_write_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 val) } /** - * ice_write_40b_phy_reg_e822 - Write a 40b value to the PHY + * ice_write_40b_phy_reg_e82x - Write a 40b value to the PHY * @hw: pointer to the HW struct * @port: port to write to * @low_addr: offset of the low register @@ -529,7 +529,7 @@ ice_write_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 val) * it up into two chunks, the lower 8 bits and the upper 32 bits. */ static int -ice_write_40b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val) +ice_write_40b_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 low_addr, u64 val) { u32 low, high; u16 high_addr; @@ -538,7 +538,7 @@ ice_write_40b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val) /* Only operate on registers known to be split into a lower 8 bit * register and an upper 32 bit register. */ - if (!ice_is_40b_phy_reg_e822(low_addr, &high_addr)) { + if (!ice_is_40b_phy_reg_e82x(low_addr, &high_addr)) { ice_debug(hw, ICE_DBG_PTP, "Invalid 40b register addr 0x%08x\n", low_addr); return -EINVAL; @@ -547,14 +547,14 @@ ice_write_40b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val) low = (u32)(val & P_REG_40B_LOW_M); high = (u32)(val >> P_REG_40B_HIGH_S); - err = ice_write_phy_reg_e822(hw, port, low_addr, low); + err = ice_write_phy_reg_e82x(hw, port, low_addr, low); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, err %d", low_addr, err); return err; } - err = ice_write_phy_reg_e822(hw, port, high_addr, high); + err = ice_write_phy_reg_e82x(hw, port, high_addr, high); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, err %d", high_addr, err); @@ -565,7 +565,7 @@ ice_write_40b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val) } /** - * ice_write_64b_phy_reg_e822 - Write a 64bit value to PHY registers + * ice_write_64b_phy_reg_e82x - Write a 64bit value to PHY registers * @hw: pointer to the HW struct * @port: PHY port to read from * @low_addr: offset of the lower register to read from @@ -577,7 +577,7 @@ ice_write_40b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val) * a 64bit value. */ static int -ice_write_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val) +ice_write_64b_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 low_addr, u64 val) { u32 low, high; u16 high_addr; @@ -586,7 +586,7 @@ ice_write_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val) /* Only operate on registers known to be split into two 32bit * registers. */ - if (!ice_is_64b_phy_reg_e822(low_addr, &high_addr)) { + if (!ice_is_64b_phy_reg_e82x(low_addr, &high_addr)) { ice_debug(hw, ICE_DBG_PTP, "Invalid 64b register addr 0x%08x\n", low_addr); return -EINVAL; @@ -595,14 +595,14 @@ ice_write_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val) low = lower_32_bits(val); high = upper_32_bits(val); - err = ice_write_phy_reg_e822(hw, port, low_addr, low); + err = ice_write_phy_reg_e82x(hw, port, low_addr, low); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, err %d", low_addr, err); return err; } - err = ice_write_phy_reg_e822(hw, port, high_addr, high); + err = ice_write_phy_reg_e82x(hw, port, high_addr, high); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, err %d", high_addr, err); @@ -613,7 +613,7 @@ ice_write_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val) } /** - * ice_fill_quad_msg_e822 - Fill message data for quad register access + * ice_fill_quad_msg_e82x - Fill message data for quad register access * @msg: the PHY message buffer to fill in * @quad: the quad to access * @offset: the register offset @@ -622,7 +622,7 @@ ice_write_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val) * multiple PHYs. */ static int -ice_fill_quad_msg_e822(struct ice_sbq_msg_input *msg, u8 quad, u16 offset) +ice_fill_quad_msg_e82x(struct ice_sbq_msg_input *msg, u8 quad, u16 offset) { u32 addr; @@ -631,7 +631,7 @@ ice_fill_quad_msg_e822(struct ice_sbq_msg_input *msg, u8 quad, u16 offset) msg->dest_dev = rmn_0; - if ((quad % ICE_QUADS_PER_PHY_E822) == 0) + if ((quad % ICE_QUADS_PER_PHY_E82X) == 0) addr = Q_0_BASE + offset; else addr = Q_1_BASE + offset; @@ -643,7 +643,7 @@ ice_fill_quad_msg_e822(struct ice_sbq_msg_input *msg, u8 quad, u16 offset) } /** - * ice_read_quad_reg_e822 - Read a PHY quad register + * ice_read_quad_reg_e82x - Read a PHY quad register * @hw: pointer to the HW struct * @quad: quad to read from * @offset: quad register offset to read @@ -653,12 +653,12 @@ ice_fill_quad_msg_e822(struct ice_sbq_msg_input *msg, u8 quad, u16 offset) * shared between multiple PHYs. */ int -ice_read_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 *val) +ice_read_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 *val) { struct ice_sbq_msg_input msg = {0}; int err; - err = ice_fill_quad_msg_e822(&msg, quad, offset); + err = ice_fill_quad_msg_e82x(&msg, quad, offset); if (err) return err; @@ -677,7 +677,7 @@ ice_read_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 *val) } /** - * ice_write_quad_reg_e822 - Write a PHY quad register + * ice_write_quad_reg_e82x - Write a PHY quad register * @hw: pointer to the HW struct * @quad: quad to write to * @offset: quad register offset to write @@ -687,12 +687,12 @@ ice_read_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 *val) * shared between multiple PHYs. */ int -ice_write_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 val) +ice_write_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 val) { struct ice_sbq_msg_input msg = {0}; int err; - err = ice_fill_quad_msg_e822(&msg, quad, offset); + err = ice_fill_quad_msg_e82x(&msg, quad, offset); if (err) return err; @@ -710,7 +710,7 @@ ice_write_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 val) } /** - * ice_read_phy_tstamp_e822 - Read a PHY timestamp out of the quad block + * ice_read_phy_tstamp_e82x - Read a PHY timestamp out of the quad block * @hw: pointer to the HW struct * @quad: the quad to read from * @idx: the timestamp index to read @@ -721,7 +721,7 @@ ice_write_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 val) * family of devices. */ static int -ice_read_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp) +ice_read_phy_tstamp_e82x(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp) { u16 lo_addr, hi_addr; u32 lo, hi; @@ -730,14 +730,14 @@ ice_read_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp) lo_addr = (u16)TS_L(Q_REG_TX_MEMORY_BANK_START, idx); hi_addr = (u16)TS_H(Q_REG_TX_MEMORY_BANK_START, idx); - err = ice_read_quad_reg_e822(hw, quad, lo_addr, &lo); + err = ice_read_quad_reg_e82x(hw, quad, lo_addr, &lo); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, err %d\n", err); return err; } - err = ice_read_quad_reg_e822(hw, quad, hi_addr, &hi); + err = ice_read_quad_reg_e82x(hw, quad, hi_addr, &hi); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, err %d\n", err); @@ -754,7 +754,7 @@ ice_read_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp) } /** - * ice_clear_phy_tstamp_e822 - Clear a timestamp from the quad block + * ice_clear_phy_tstamp_e82x - Clear a timestamp from the quad block * @hw: pointer to the HW struct * @quad: the quad to read from * @idx: the timestamp index to reset @@ -770,18 +770,18 @@ ice_read_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp) * * To directly clear the contents of the timestamp block entirely, discarding * all timestamp data at once, software should instead use - * ice_ptp_reset_ts_memory_quad_e822(). + * ice_ptp_reset_ts_memory_quad_e82x(). * * This function should only be called on an idx whose bit is set according to * ice_get_phy_tx_tstamp_ready(). */ static int -ice_clear_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx) +ice_clear_phy_tstamp_e82x(struct ice_hw *hw, u8 quad, u8 idx) { u64 unused_tstamp; int err; - err = ice_read_phy_tstamp_e822(hw, quad, idx, &unused_tstamp); + err = ice_read_phy_tstamp_e82x(hw, quad, idx, &unused_tstamp); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to read the timestamp register for quad %u, idx %u, err %d\n", quad, idx, err); @@ -792,33 +792,33 @@ ice_clear_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx) } /** - * ice_ptp_reset_ts_memory_quad_e822 - Clear all timestamps from the quad block + * ice_ptp_reset_ts_memory_quad_e82x - Clear all timestamps from the quad block * @hw: pointer to the HW struct * @quad: the quad to read from * * Clear all timestamps from the PHY quad block that is shared between the * internal PHYs on the E822 devices. */ -void ice_ptp_reset_ts_memory_quad_e822(struct ice_hw *hw, u8 quad) +void ice_ptp_reset_ts_memory_quad_e82x(struct ice_hw *hw, u8 quad) { - ice_write_quad_reg_e822(hw, quad, Q_REG_TS_CTRL, Q_REG_TS_CTRL_M); - ice_write_quad_reg_e822(hw, quad, Q_REG_TS_CTRL, ~(u32)Q_REG_TS_CTRL_M); + ice_write_quad_reg_e82x(hw, quad, Q_REG_TS_CTRL, Q_REG_TS_CTRL_M); + ice_write_quad_reg_e82x(hw, quad, Q_REG_TS_CTRL, ~(u32)Q_REG_TS_CTRL_M); } /** - * ice_ptp_reset_ts_memory_e822 - Clear all timestamps from all quad blocks + * ice_ptp_reset_ts_memory_e82x - Clear all timestamps from all quad blocks * @hw: pointer to the HW struct */ -static void ice_ptp_reset_ts_memory_e822(struct ice_hw *hw) +static void ice_ptp_reset_ts_memory_e82x(struct ice_hw *hw) { unsigned int quad; for (quad = 0; quad < ICE_MAX_QUAD; quad++) - ice_ptp_reset_ts_memory_quad_e822(hw, quad); + ice_ptp_reset_ts_memory_quad_e82x(hw, quad); } /** - * ice_read_cgu_reg_e822 - Read a CGU register + * ice_read_cgu_reg_e82x - Read a CGU register * @hw: pointer to the HW struct * @addr: Register address to read * @val: storage for register value read @@ -827,7 +827,7 @@ static void ice_ptp_reset_ts_memory_e822(struct ice_hw *hw) * applicable to E822 devices. */ static int -ice_read_cgu_reg_e822(struct ice_hw *hw, u32 addr, u32 *val) +ice_read_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 *val) { struct ice_sbq_msg_input cgu_msg; int err; @@ -850,7 +850,7 @@ ice_read_cgu_reg_e822(struct ice_hw *hw, u32 addr, u32 *val) } /** - * ice_write_cgu_reg_e822 - Write a CGU register + * ice_write_cgu_reg_e82x - Write a CGU register * @hw: pointer to the HW struct * @addr: Register address to write * @val: value to write into the register @@ -859,7 +859,7 @@ ice_read_cgu_reg_e822(struct ice_hw *hw, u32 addr, u32 *val) * applicable to E822 devices. */ static int -ice_write_cgu_reg_e822(struct ice_hw *hw, u32 addr, u32 val) +ice_write_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 val) { struct ice_sbq_msg_input cgu_msg; int err; @@ -925,7 +925,7 @@ static const char *ice_clk_src_str(u8 clk_src) } /** - * ice_cfg_cgu_pll_e822 - Configure the Clock Generation Unit + * ice_cfg_cgu_pll_e82x - Configure the Clock Generation Unit * @hw: pointer to the HW struct * @clk_freq: Clock frequency to program * @clk_src: Clock source to select (TIME_REF, or TCX0) @@ -934,7 +934,7 @@ static const char *ice_clk_src_str(u8 clk_src) * time reference, enabling the PLL which drives the PTP hardware clock. */ static int -ice_cfg_cgu_pll_e822(struct ice_hw *hw, enum ice_time_ref_freq clk_freq, +ice_cfg_cgu_pll_e82x(struct ice_hw *hw, enum ice_time_ref_freq clk_freq, enum ice_clk_src clk_src) { union tspll_ro_bwm_lf bwm_lf; @@ -963,15 +963,15 @@ ice_cfg_cgu_pll_e822(struct ice_hw *hw, enum ice_time_ref_freq clk_freq, return -EINVAL; } - err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD9, &dw9.val); + err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD9, &dw9.val); if (err) return err; - err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD24, &dw24.val); + err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val); if (err) return err; - err = ice_read_cgu_reg_e822(hw, TSPLL_RO_BWM_LF, &bwm_lf.val); + err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_BWM_LF, &bwm_lf.val); if (err) return err; @@ -986,43 +986,43 @@ ice_cfg_cgu_pll_e822(struct ice_hw *hw, enum ice_time_ref_freq clk_freq, if (dw24.field.ts_pll_enable) { dw24.field.ts_pll_enable = 0; - err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val); + err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val); if (err) return err; } /* Set the frequency */ dw9.field.time_ref_freq_sel = clk_freq; - err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD9, dw9.val); + err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD9, dw9.val); if (err) return err; /* Configure the TS PLL feedback divisor */ - err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD19, &dw19.val); + err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD19, &dw19.val); if (err) return err; dw19.field.tspll_fbdiv_intgr = e822_cgu_params[clk_freq].feedback_div; dw19.field.tspll_ndivratio = 1; - err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD19, dw19.val); + err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD19, dw19.val); if (err) return err; /* Configure the TS PLL post divisor */ - err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD22, &dw22.val); + err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD22, &dw22.val); if (err) return err; dw22.field.time1588clk_div = e822_cgu_params[clk_freq].post_pll_div; dw22.field.time1588clk_sel_div2 = 0; - err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD22, dw22.val); + err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD22, dw22.val); if (err) return err; /* Configure the TS PLL pre divisor and clock source */ - err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD24, &dw24.val); + err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val); if (err) return err; @@ -1030,21 +1030,21 @@ ice_cfg_cgu_pll_e822(struct ice_hw *hw, enum ice_time_ref_freq clk_freq, dw24.field.tspll_fbdiv_frac = e822_cgu_params[clk_freq].frac_n_div; dw24.field.time_ref_sel = clk_src; - err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val); + err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val); if (err) return err; /* Finally, enable the PLL */ dw24.field.ts_pll_enable = 1; - err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val); + err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val); if (err) return err; /* Wait to verify if the PLL locks */ usleep_range(1000, 5000); - err = ice_read_cgu_reg_e822(hw, TSPLL_RO_BWM_LF, &bwm_lf.val); + err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_BWM_LF, &bwm_lf.val); if (err) return err; @@ -1064,18 +1064,18 @@ ice_cfg_cgu_pll_e822(struct ice_hw *hw, enum ice_time_ref_freq clk_freq, } /** - * ice_init_cgu_e822 - Initialize CGU with settings from firmware + * ice_init_cgu_e82x - Initialize CGU with settings from firmware * @hw: pointer to the HW structure * * Initialize the Clock Generation Unit of the E822 device. */ -static int ice_init_cgu_e822(struct ice_hw *hw) +static int ice_init_cgu_e82x(struct ice_hw *hw) { struct ice_ts_func_info *ts_info = &hw->func_caps.ts_func_info; union tspll_cntr_bist_settings cntr_bist; int err; - err = ice_read_cgu_reg_e822(hw, TSPLL_CNTR_BIST_SETTINGS, + err = ice_read_cgu_reg_e82x(hw, TSPLL_CNTR_BIST_SETTINGS, &cntr_bist.val); if (err) return err; @@ -1084,7 +1084,7 @@ static int ice_init_cgu_e822(struct ice_hw *hw) cntr_bist.field.i_plllock_sel_0 = 0; cntr_bist.field.i_plllock_sel_1 = 0; - err = ice_write_cgu_reg_e822(hw, TSPLL_CNTR_BIST_SETTINGS, + err = ice_write_cgu_reg_e82x(hw, TSPLL_CNTR_BIST_SETTINGS, cntr_bist.val); if (err) return err; @@ -1092,7 +1092,7 @@ static int ice_init_cgu_e822(struct ice_hw *hw) /* Configure the CGU PLL using the parameters from the function * capabilities. */ - err = ice_cfg_cgu_pll_e822(hw, ts_info->time_ref, + err = ice_cfg_cgu_pll_e82x(hw, ts_info->time_ref, (enum ice_clk_src)ts_info->clk_src); if (err) return err; @@ -1113,7 +1113,7 @@ static int ice_ptp_set_vernier_wl(struct ice_hw *hw) for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) { int err; - err = ice_write_phy_reg_e822(hw, port, P_REG_WL, + err = ice_write_phy_reg_e82x(hw, port, P_REG_WL, PTP_VERNIER_WL); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to set vernier window length for port %u, err %d\n", @@ -1126,12 +1126,12 @@ static int ice_ptp_set_vernier_wl(struct ice_hw *hw) } /** - * ice_ptp_init_phc_e822 - Perform E822 specific PHC initialization + * ice_ptp_init_phc_e82x - Perform E822 specific PHC initialization * @hw: pointer to HW struct * * Perform PHC initialization steps specific to E822 devices. */ -static int ice_ptp_init_phc_e822(struct ice_hw *hw) +static int ice_ptp_init_phc_e82x(struct ice_hw *hw) { int err; u32 regval; @@ -1145,7 +1145,7 @@ static int ice_ptp_init_phc_e822(struct ice_hw *hw) wr32(hw, PF_SB_REM_DEV_CTL, regval); /* Initialize the Clock Generation Unit */ - err = ice_init_cgu_e822(hw); + err = ice_init_cgu_e82x(hw); if (err) return err; @@ -1154,7 +1154,7 @@ static int ice_ptp_init_phc_e822(struct ice_hw *hw) } /** - * ice_ptp_prep_phy_time_e822 - Prepare PHY port with initial time + * ice_ptp_prep_phy_time_e82x - Prepare PHY port with initial time * @hw: pointer to the HW struct * @time: Time to initialize the PHY port clocks to * @@ -1164,7 +1164,7 @@ static int ice_ptp_init_phc_e822(struct ice_hw *hw) * units of nominal nanoseconds. */ static int -ice_ptp_prep_phy_time_e822(struct ice_hw *hw, u32 time) +ice_ptp_prep_phy_time_e82x(struct ice_hw *hw, u32 time) { u64 phy_time; u8 port; @@ -1177,14 +1177,14 @@ ice_ptp_prep_phy_time_e822(struct ice_hw *hw, u32 time) for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) { /* Tx case */ - err = ice_write_64b_phy_reg_e822(hw, port, + err = ice_write_64b_phy_reg_e82x(hw, port, P_REG_TX_TIMER_INC_PRE_L, phy_time); if (err) goto exit_err; /* Rx case */ - err = ice_write_64b_phy_reg_e822(hw, port, + err = ice_write_64b_phy_reg_e82x(hw, port, P_REG_RX_TIMER_INC_PRE_L, phy_time); if (err) @@ -1201,7 +1201,7 @@ exit_err: } /** - * ice_ptp_prep_port_adj_e822 - Prepare a single port for time adjust + * ice_ptp_prep_port_adj_e82x - Prepare a single port for time adjust * @hw: pointer to HW struct * @port: Port number to be programmed * @time: time in cycles to adjust the port Tx and Rx clocks @@ -1216,7 +1216,7 @@ exit_err: * Negative adjustments are supported using 2s complement arithmetic. */ static int -ice_ptp_prep_port_adj_e822(struct ice_hw *hw, u8 port, s64 time) +ice_ptp_prep_port_adj_e82x(struct ice_hw *hw, u8 port, s64 time) { u32 l_time, u_time; int err; @@ -1225,23 +1225,23 @@ ice_ptp_prep_port_adj_e822(struct ice_hw *hw, u8 port, s64 time) u_time = upper_32_bits(time); /* Tx case */ - err = ice_write_phy_reg_e822(hw, port, P_REG_TX_TIMER_INC_PRE_L, + err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_TIMER_INC_PRE_L, l_time); if (err) goto exit_err; - err = ice_write_phy_reg_e822(hw, port, P_REG_TX_TIMER_INC_PRE_U, + err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_TIMER_INC_PRE_U, u_time); if (err) goto exit_err; /* Rx case */ - err = ice_write_phy_reg_e822(hw, port, P_REG_RX_TIMER_INC_PRE_L, + err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_TIMER_INC_PRE_L, l_time); if (err) goto exit_err; - err = ice_write_phy_reg_e822(hw, port, P_REG_RX_TIMER_INC_PRE_U, + err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_TIMER_INC_PRE_U, u_time); if (err) goto exit_err; @@ -1255,7 +1255,7 @@ exit_err: } /** - * ice_ptp_prep_phy_adj_e822 - Prep PHY ports for a time adjustment + * ice_ptp_prep_phy_adj_e82x - Prep PHY ports for a time adjustment * @hw: pointer to HW struct * @adj: adjustment in nanoseconds * @@ -1264,7 +1264,7 @@ exit_err: * ICE_PTP_ADJ_TIME or ICE_PTP_ADJ_TIME_AT_TIME sync command. */ static int -ice_ptp_prep_phy_adj_e822(struct ice_hw *hw, s32 adj) +ice_ptp_prep_phy_adj_e82x(struct ice_hw *hw, s32 adj) { s64 cycles; u8 port; @@ -1281,7 +1281,7 @@ ice_ptp_prep_phy_adj_e822(struct ice_hw *hw, s32 adj) for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) { int err; - err = ice_ptp_prep_port_adj_e822(hw, port, cycles); + err = ice_ptp_prep_port_adj_e82x(hw, port, cycles); if (err) return err; } @@ -1290,7 +1290,7 @@ ice_ptp_prep_phy_adj_e822(struct ice_hw *hw, s32 adj) } /** - * ice_ptp_prep_phy_incval_e822 - Prepare PHY ports for time adjustment + * ice_ptp_prep_phy_incval_e82x - Prepare PHY ports for time adjustment * @hw: pointer to HW struct * @incval: new increment value to prepare * @@ -1299,13 +1299,13 @@ ice_ptp_prep_phy_adj_e822(struct ice_hw *hw, s32 adj) * issuing an ICE_PTP_INIT_INCVAL command. */ static int -ice_ptp_prep_phy_incval_e822(struct ice_hw *hw, u64 incval) +ice_ptp_prep_phy_incval_e82x(struct ice_hw *hw, u64 incval) { int err; u8 port; for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) { - err = ice_write_40b_phy_reg_e822(hw, port, P_REG_TIMETUS_L, + err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_TIMETUS_L, incval); if (err) goto exit_err; @@ -1337,7 +1337,7 @@ ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts) int err; /* Tx case */ - err = ice_read_64b_phy_reg_e822(hw, port, P_REG_TX_CAPTURE_L, tx_ts); + err = ice_read_64b_phy_reg_e82x(hw, port, P_REG_TX_CAPTURE_L, tx_ts); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to read REG_TX_CAPTURE, err %d\n", err); @@ -1348,7 +1348,7 @@ ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts) (unsigned long long)*tx_ts); /* Rx case */ - err = ice_read_64b_phy_reg_e822(hw, port, P_REG_RX_CAPTURE_L, rx_ts); + err = ice_read_64b_phy_reg_e82x(hw, port, P_REG_RX_CAPTURE_L, rx_ts); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_CAPTURE, err %d\n", err); @@ -1362,7 +1362,7 @@ ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts) } /** - * ice_ptp_write_port_cmd_e822 - Prepare a single PHY port for a timer command + * ice_ptp_write_port_cmd_e82x - Prepare a single PHY port for a timer command * @hw: pointer to HW struct * @port: Port to which cmd has to be sent * @cmd: Command to be sent to the port @@ -1372,8 +1372,8 @@ ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts) * Do not use this function directly. If you want to configure exactly one * port, use ice_ptp_one_port_cmd() instead. */ -static int -ice_ptp_write_port_cmd_e822(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd) +static int ice_ptp_write_port_cmd_e82x(struct ice_hw *hw, u8 port, + enum ice_ptp_tmr_cmd cmd) { u32 cmd_val, val; u8 tmr_idx; @@ -1403,7 +1403,7 @@ ice_ptp_write_port_cmd_e822(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd /* Tx case */ /* Read, modify, write */ - err = ice_read_phy_reg_e822(hw, port, P_REG_TX_TMR_CMD, &val); + err = ice_read_phy_reg_e82x(hw, port, P_REG_TX_TMR_CMD, &val); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_TMR_CMD, err %d\n", err); @@ -1414,7 +1414,7 @@ ice_ptp_write_port_cmd_e822(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd val &= ~TS_CMD_MASK; val |= cmd_val; - err = ice_write_phy_reg_e822(hw, port, P_REG_TX_TMR_CMD, val); + err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_TMR_CMD, val); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_TMR_CMD, err %d\n", err); @@ -1423,7 +1423,7 @@ ice_ptp_write_port_cmd_e822(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd /* Rx case */ /* Read, modify, write */ - err = ice_read_phy_reg_e822(hw, port, P_REG_RX_TMR_CMD, &val); + err = ice_read_phy_reg_e82x(hw, port, P_REG_RX_TMR_CMD, &val); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_TMR_CMD, err %d\n", err); @@ -1434,7 +1434,7 @@ ice_ptp_write_port_cmd_e822(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd val &= ~TS_CMD_MASK; val |= cmd_val; - err = ice_write_phy_reg_e822(hw, port, P_REG_RX_TMR_CMD, val); + err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_TMR_CMD, val); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to write back RX_TMR_CMD, err %d\n", err); @@ -1469,7 +1469,7 @@ ice_ptp_one_port_cmd(struct ice_hw *hw, u8 configured_port, else cmd = ICE_PTP_NOP; - err = ice_ptp_write_port_cmd_e822(hw, port, cmd); + err = ice_ptp_write_port_cmd_e82x(hw, port, cmd); if (err) return err; } @@ -1478,7 +1478,7 @@ ice_ptp_one_port_cmd(struct ice_hw *hw, u8 configured_port, } /** - * ice_ptp_port_cmd_e822 - Prepare all ports for a timer command + * ice_ptp_port_cmd_e82x - Prepare all ports for a timer command * @hw: pointer to the HW struct * @cmd: timer command to prepare * @@ -1486,14 +1486,14 @@ ice_ptp_one_port_cmd(struct ice_hw *hw, u8 configured_port, * command. */ static int -ice_ptp_port_cmd_e822(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) +ice_ptp_port_cmd_e82x(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) { u8 port; for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) { int err; - err = ice_ptp_write_port_cmd_e822(hw, port, cmd); + err = ice_ptp_write_port_cmd_e82x(hw, port, cmd); if (err) return err; } @@ -1509,7 +1509,7 @@ ice_ptp_port_cmd_e822(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) */ /** - * ice_phy_get_speed_and_fec_e822 - Get link speed and FEC based on serdes mode + * ice_phy_get_speed_and_fec_e82x - Get link speed and FEC based on serdes mode * @hw: pointer to HW struct * @port: the port to read from * @link_out: if non-NULL, holds link speed on success @@ -1519,7 +1519,7 @@ ice_ptp_port_cmd_e822(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) * algorithm. */ static int -ice_phy_get_speed_and_fec_e822(struct ice_hw *hw, u8 port, +ice_phy_get_speed_and_fec_e82x(struct ice_hw *hw, u8 port, enum ice_ptp_link_spd *link_out, enum ice_ptp_fec_mode *fec_out) { @@ -1528,7 +1528,7 @@ ice_phy_get_speed_and_fec_e822(struct ice_hw *hw, u8 port, u32 serdes; int err; - err = ice_read_phy_reg_e822(hw, port, P_REG_LINK_SPEED, &serdes); + err = ice_read_phy_reg_e82x(hw, port, P_REG_LINK_SPEED, &serdes); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to read serdes info\n"); return err; @@ -1585,18 +1585,18 @@ ice_phy_get_speed_and_fec_e822(struct ice_hw *hw, u8 port, } /** - * ice_phy_cfg_lane_e822 - Configure PHY quad for single/multi-lane timestamp + * ice_phy_cfg_lane_e82x - Configure PHY quad for single/multi-lane timestamp * @hw: pointer to HW struct * @port: to configure the quad for */ -static void ice_phy_cfg_lane_e822(struct ice_hw *hw, u8 port) +static void ice_phy_cfg_lane_e82x(struct ice_hw *hw, u8 port) { enum ice_ptp_link_spd link_spd; int err; u32 val; u8 quad; - err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, NULL); + err = ice_phy_get_speed_and_fec_e82x(hw, port, &link_spd, NULL); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to get PHY link speed, err %d\n", err); @@ -1605,7 +1605,7 @@ static void ice_phy_cfg_lane_e822(struct ice_hw *hw, u8 port) quad = port / ICE_PORTS_PER_QUAD; - err = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG, &val); + err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG, &val); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEM_GLB_CFG, err %d\n", err); @@ -1617,7 +1617,7 @@ static void ice_phy_cfg_lane_e822(struct ice_hw *hw, u8 port) else val |= Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_M; - err = ice_write_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG, val); + err = ice_write_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG, val); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_MEM_GBL_CFG, err %d\n", err); @@ -1626,7 +1626,7 @@ static void ice_phy_cfg_lane_e822(struct ice_hw *hw, u8 port) } /** - * ice_phy_cfg_uix_e822 - Configure Serdes UI to TU conversion for E822 + * ice_phy_cfg_uix_e82x - Configure Serdes UI to TU conversion for E822 * @hw: pointer to the HW structure * @port: the port to configure * @@ -1671,12 +1671,12 @@ static void ice_phy_cfg_lane_e822(struct ice_hw *hw, u8 port) * a divide by 390,625,000. This does lose some precision, but avoids * miscalculation due to arithmetic overflow. */ -static int ice_phy_cfg_uix_e822(struct ice_hw *hw, u8 port) +static int ice_phy_cfg_uix_e82x(struct ice_hw *hw, u8 port) { u64 cur_freq, clk_incval, tu_per_sec, uix; int err; - cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw)); + cur_freq = ice_e82x_pll_freq(ice_e82x_time_ref(hw)); clk_incval = ice_ptp_read_src_incval(hw); /* Calculate TUs per second divided by 256 */ @@ -1688,7 +1688,7 @@ static int ice_phy_cfg_uix_e822(struct ice_hw *hw, u8 port) /* Program the 10Gb/40Gb conversion ratio */ uix = div_u64(tu_per_sec * LINE_UI_10G_40G, 390625000); - err = ice_write_64b_phy_reg_e822(hw, port, P_REG_UIX66_10G_40G_L, + err = ice_write_64b_phy_reg_e82x(hw, port, P_REG_UIX66_10G_40G_L, uix); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to write UIX66_10G_40G, err %d\n", @@ -1699,7 +1699,7 @@ static int ice_phy_cfg_uix_e822(struct ice_hw *hw, u8 port) /* Program the 25Gb/100Gb conversion ratio */ uix = div_u64(tu_per_sec * LINE_UI_25G_100G, 390625000); - err = ice_write_64b_phy_reg_e822(hw, port, P_REG_UIX66_25G_100G_L, + err = ice_write_64b_phy_reg_e82x(hw, port, P_REG_UIX66_25G_100G_L, uix); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to write UIX66_25G_100G, err %d\n", @@ -1711,7 +1711,7 @@ static int ice_phy_cfg_uix_e822(struct ice_hw *hw, u8 port) } /** - * ice_phy_cfg_parpcs_e822 - Configure TUs per PAR/PCS clock cycle + * ice_phy_cfg_parpcs_e82x - Configure TUs per PAR/PCS clock cycle * @hw: pointer to the HW struct * @port: port to configure * @@ -1753,18 +1753,18 @@ static int ice_phy_cfg_uix_e822(struct ice_hw *hw, u8 port) * frequency is ~29 bits, so multiplying them together should fit within the * 64 bit arithmetic. */ -static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port) +static int ice_phy_cfg_parpcs_e82x(struct ice_hw *hw, u8 port) { u64 cur_freq, clk_incval, tu_per_sec, phy_tus; enum ice_ptp_link_spd link_spd; enum ice_ptp_fec_mode fec_mode; int err; - err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode); + err = ice_phy_get_speed_and_fec_e82x(hw, port, &link_spd, &fec_mode); if (err) return err; - cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw)); + cur_freq = ice_e82x_pll_freq(ice_e82x_time_ref(hw)); clk_incval = ice_ptp_read_src_incval(hw); /* Calculate TUs per cycle of the PHC clock */ @@ -1784,7 +1784,7 @@ static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port) else phy_tus = 0; - err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PAR_TX_TUS_L, + err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_PAR_TX_TUS_L, phy_tus); if (err) return err; @@ -1796,7 +1796,7 @@ static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port) else phy_tus = 0; - err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PAR_RX_TUS_L, + err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_PAR_RX_TUS_L, phy_tus); if (err) return err; @@ -1808,7 +1808,7 @@ static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port) else phy_tus = 0; - err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PCS_TX_TUS_L, + err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_PCS_TX_TUS_L, phy_tus); if (err) return err; @@ -1820,7 +1820,7 @@ static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port) else phy_tus = 0; - err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PCS_RX_TUS_L, + err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_PCS_RX_TUS_L, phy_tus); if (err) return err; @@ -1832,7 +1832,7 @@ static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port) else phy_tus = 0; - err = ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PAR_TX_TUS_L, + err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_DESK_PAR_TX_TUS_L, phy_tus); if (err) return err; @@ -1844,7 +1844,7 @@ static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port) else phy_tus = 0; - err = ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PAR_RX_TUS_L, + err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_DESK_PAR_RX_TUS_L, phy_tus); if (err) return err; @@ -1856,7 +1856,7 @@ static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port) else phy_tus = 0; - err = ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PCS_TX_TUS_L, + err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_DESK_PCS_TX_TUS_L, phy_tus); if (err) return err; @@ -1868,23 +1868,23 @@ static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port) else phy_tus = 0; - return ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PCS_RX_TUS_L, + return ice_write_40b_phy_reg_e82x(hw, port, P_REG_DESK_PCS_RX_TUS_L, phy_tus); } /** - * ice_calc_fixed_tx_offset_e822 - Calculated Fixed Tx offset for a port + * ice_calc_fixed_tx_offset_e82x - Calculated Fixed Tx offset for a port * @hw: pointer to the HW struct * @link_spd: the Link speed to calculate for * * Calculate the fixed offset due to known static latency data. */ static u64 -ice_calc_fixed_tx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd) +ice_calc_fixed_tx_offset_e82x(struct ice_hw *hw, enum ice_ptp_link_spd link_spd) { u64 cur_freq, clk_incval, tu_per_sec, fixed_offset; - cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw)); + cur_freq = ice_e82x_pll_freq(ice_e82x_time_ref(hw)); clk_incval = ice_ptp_read_src_incval(hw); /* Calculate TUs per second */ @@ -1904,7 +1904,7 @@ ice_calc_fixed_tx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd) } /** - * ice_phy_cfg_tx_offset_e822 - Configure total Tx timestamp offset + * ice_phy_cfg_tx_offset_e82x - Configure total Tx timestamp offset * @hw: pointer to the HW struct * @port: the PHY port to configure * @@ -1926,7 +1926,7 @@ ice_calc_fixed_tx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd) * Returns zero on success, -EBUSY if the hardware vernier offset * calibration has not completed, or another error code on failure. */ -int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port) +int ice_phy_cfg_tx_offset_e82x(struct ice_hw *hw, u8 port) { enum ice_ptp_link_spd link_spd; enum ice_ptp_fec_mode fec_mode; @@ -1935,7 +1935,7 @@ int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port) u32 reg; /* Nothing to do if we've already programmed the offset */ - err = ice_read_phy_reg_e822(hw, port, P_REG_TX_OR, ®); + err = ice_read_phy_reg_e82x(hw, port, P_REG_TX_OR, ®); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_OR for port %u, err %d\n", port, err); @@ -1945,7 +1945,7 @@ int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port) if (reg) return 0; - err = ice_read_phy_reg_e822(hw, port, P_REG_TX_OV_STATUS, ®); + err = ice_read_phy_reg_e82x(hw, port, P_REG_TX_OV_STATUS, ®); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_OV_STATUS for port %u, err %d\n", port, err); @@ -1955,11 +1955,11 @@ int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port) if (!(reg & P_REG_TX_OV_STATUS_OV_M)) return -EBUSY; - err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode); + err = ice_phy_get_speed_and_fec_e82x(hw, port, &link_spd, &fec_mode); if (err) return err; - total_offset = ice_calc_fixed_tx_offset_e822(hw, link_spd); + total_offset = ice_calc_fixed_tx_offset_e82x(hw, link_spd); /* Read the first Vernier offset from the PHY register and add it to * the total offset. @@ -1970,7 +1970,7 @@ int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port) link_spd == ICE_PTP_LNK_SPD_25G_RS || link_spd == ICE_PTP_LNK_SPD_40G || link_spd == ICE_PTP_LNK_SPD_50G) { - err = ice_read_64b_phy_reg_e822(hw, port, + err = ice_read_64b_phy_reg_e82x(hw, port, P_REG_PAR_PCS_TX_OFFSET_L, &val); if (err) @@ -1985,7 +1985,7 @@ int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port) */ if (link_spd == ICE_PTP_LNK_SPD_50G_RS || link_spd == ICE_PTP_LNK_SPD_100G_RS) { - err = ice_read_64b_phy_reg_e822(hw, port, + err = ice_read_64b_phy_reg_e82x(hw, port, P_REG_PAR_TX_TIME_L, &val); if (err) @@ -1998,12 +1998,12 @@ int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port) * PHY and indicate that the Tx offset is ready. After this, * timestamps will be enabled. */ - err = ice_write_64b_phy_reg_e822(hw, port, P_REG_TOTAL_TX_OFFSET_L, + err = ice_write_64b_phy_reg_e82x(hw, port, P_REG_TOTAL_TX_OFFSET_L, total_offset); if (err) return err; - err = ice_write_phy_reg_e822(hw, port, P_REG_TX_OR, 1); + err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_OR, 1); if (err) return err; @@ -2014,7 +2014,7 @@ int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port) } /** - * ice_phy_calc_pmd_adj_e822 - Calculate PMD adjustment for Rx + * ice_phy_calc_pmd_adj_e82x - Calculate PMD adjustment for Rx * @hw: pointer to the HW struct * @port: the PHY port to adjust for * @link_spd: the current link speed of the PHY @@ -2026,7 +2026,7 @@ int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port) * various delays caused when receiving a packet. */ static int -ice_phy_calc_pmd_adj_e822(struct ice_hw *hw, u8 port, +ice_phy_calc_pmd_adj_e82x(struct ice_hw *hw, u8 port, enum ice_ptp_link_spd link_spd, enum ice_ptp_fec_mode fec_mode, u64 *pmd_adj) { @@ -2035,7 +2035,7 @@ ice_phy_calc_pmd_adj_e822(struct ice_hw *hw, u8 port, u32 val; int err; - err = ice_read_phy_reg_e822(hw, port, P_REG_PMD_ALIGNMENT, &val); + err = ice_read_phy_reg_e82x(hw, port, P_REG_PMD_ALIGNMENT, &val); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to read PMD alignment, err %d\n", err); @@ -2044,7 +2044,7 @@ ice_phy_calc_pmd_adj_e822(struct ice_hw *hw, u8 port, pmd_align = (u8)val; - cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw)); + cur_freq = ice_e82x_pll_freq(ice_e82x_time_ref(hw)); clk_incval = ice_ptp_read_src_incval(hw); /* Calculate TUs per second */ @@ -2123,7 +2123,7 @@ ice_phy_calc_pmd_adj_e822(struct ice_hw *hw, u8 port, u64 cycle_adj; u8 rx_cycle; - err = ice_read_phy_reg_e822(hw, port, P_REG_RX_40_TO_160_CNT, + err = ice_read_phy_reg_e82x(hw, port, P_REG_RX_40_TO_160_CNT, &val); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to read 25G-RS Rx cycle count, err %d\n", @@ -2145,7 +2145,7 @@ ice_phy_calc_pmd_adj_e822(struct ice_hw *hw, u8 port, u64 cycle_adj; u8 rx_cycle; - err = ice_read_phy_reg_e822(hw, port, P_REG_RX_80_TO_160_CNT, + err = ice_read_phy_reg_e82x(hw, port, P_REG_RX_80_TO_160_CNT, &val); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to read 50G-RS Rx cycle count, err %d\n", @@ -2172,18 +2172,18 @@ ice_phy_calc_pmd_adj_e822(struct ice_hw *hw, u8 port, } /** - * ice_calc_fixed_rx_offset_e822 - Calculated the fixed Rx offset for a port + * ice_calc_fixed_rx_offset_e82x - Calculated the fixed Rx offset for a port * @hw: pointer to HW struct * @link_spd: The Link speed to calculate for * * Determine the fixed Rx latency for a given link speed. */ static u64 -ice_calc_fixed_rx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd) +ice_calc_fixed_rx_offset_e82x(struct ice_hw *hw, enum ice_ptp_link_spd link_spd) { u64 cur_freq, clk_incval, tu_per_sec, fixed_offset; - cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw)); + cur_freq = ice_e82x_pll_freq(ice_e82x_time_ref(hw)); clk_incval = ice_ptp_read_src_incval(hw); /* Calculate TUs per second */ @@ -2203,7 +2203,7 @@ ice_calc_fixed_rx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd) } /** - * ice_phy_cfg_rx_offset_e822 - Configure total Rx timestamp offset + * ice_phy_cfg_rx_offset_e82x - Configure total Rx timestamp offset * @hw: pointer to the HW struct * @port: the PHY port to configure * @@ -2229,7 +2229,7 @@ ice_calc_fixed_rx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd) * Returns zero on success, -EBUSY if the hardware vernier offset * calibration has not completed, or another error code on failure. */ -int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port) +int ice_phy_cfg_rx_offset_e82x(struct ice_hw *hw, u8 port) { enum ice_ptp_link_spd link_spd; enum ice_ptp_fec_mode fec_mode; @@ -2238,7 +2238,7 @@ int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port) u32 reg; /* Nothing to do if we've already programmed the offset */ - err = ice_read_phy_reg_e822(hw, port, P_REG_RX_OR, ®); + err = ice_read_phy_reg_e82x(hw, port, P_REG_RX_OR, ®); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_OR for port %u, err %d\n", port, err); @@ -2248,7 +2248,7 @@ int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port) if (reg) return 0; - err = ice_read_phy_reg_e822(hw, port, P_REG_RX_OV_STATUS, ®); + err = ice_read_phy_reg_e82x(hw, port, P_REG_RX_OV_STATUS, ®); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_OV_STATUS for port %u, err %d\n", port, err); @@ -2258,16 +2258,16 @@ int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port) if (!(reg & P_REG_RX_OV_STATUS_OV_M)) return -EBUSY; - err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode); + err = ice_phy_get_speed_and_fec_e82x(hw, port, &link_spd, &fec_mode); if (err) return err; - total_offset = ice_calc_fixed_rx_offset_e822(hw, link_spd); + total_offset = ice_calc_fixed_rx_offset_e82x(hw, link_spd); /* Read the first Vernier offset from the PHY register and add it to * the total offset. */ - err = ice_read_64b_phy_reg_e822(hw, port, + err = ice_read_64b_phy_reg_e82x(hw, port, P_REG_PAR_PCS_RX_OFFSET_L, &val); if (err) @@ -2282,7 +2282,7 @@ int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port) link_spd == ICE_PTP_LNK_SPD_50G || link_spd == ICE_PTP_LNK_SPD_50G_RS || link_spd == ICE_PTP_LNK_SPD_100G_RS) { - err = ice_read_64b_phy_reg_e822(hw, port, + err = ice_read_64b_phy_reg_e82x(hw, port, P_REG_PAR_RX_TIME_L, &val); if (err) @@ -2292,7 +2292,7 @@ int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port) } /* In addition, Rx must account for the PMD alignment */ - err = ice_phy_calc_pmd_adj_e822(hw, port, link_spd, fec_mode, &pmd); + err = ice_phy_calc_pmd_adj_e82x(hw, port, link_spd, fec_mode, &pmd); if (err) return err; @@ -2308,12 +2308,12 @@ int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port) * PHY and indicate that the Rx offset is ready. After this, * timestamps will be enabled. */ - err = ice_write_64b_phy_reg_e822(hw, port, P_REG_TOTAL_RX_OFFSET_L, + err = ice_write_64b_phy_reg_e82x(hw, port, P_REG_TOTAL_RX_OFFSET_L, total_offset); if (err) return err; - err = ice_write_phy_reg_e822(hw, port, P_REG_RX_OR, 1); + err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_OR, 1); if (err) return err; @@ -2324,7 +2324,7 @@ int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port) } /** - * ice_read_phy_and_phc_time_e822 - Simultaneously capture PHC and PHY time + * ice_read_phy_and_phc_time_e82x - Simultaneously capture PHC and PHY time * @hw: pointer to the HW struct * @port: the PHY port to read * @phy_time: on return, the 64bit PHY timer value @@ -2334,7 +2334,7 @@ int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port) * and PHC timer values. */ static int -ice_read_phy_and_phc_time_e822(struct ice_hw *hw, u8 port, u64 *phy_time, +ice_read_phy_and_phc_time_e82x(struct ice_hw *hw, u8 port, u64 *phy_time, u64 *phc_time) { u64 tx_time, rx_time; @@ -2381,7 +2381,7 @@ ice_read_phy_and_phc_time_e822(struct ice_hw *hw, u8 port, u64 *phy_time, } /** - * ice_sync_phy_timer_e822 - Synchronize the PHY timer with PHC timer + * ice_sync_phy_timer_e82x - Synchronize the PHY timer with PHC timer * @hw: pointer to the HW struct * @port: the PHY port to synchronize * @@ -2392,7 +2392,7 @@ ice_read_phy_and_phc_time_e822(struct ice_hw *hw, u8 port, u64 *phy_time, * to the PHY timer in order to ensure it reads the same value as the * primary PHC timer. */ -static int ice_sync_phy_timer_e822(struct ice_hw *hw, u8 port) +static int ice_sync_phy_timer_e82x(struct ice_hw *hw, u8 port) { u64 phc_time, phy_time, difference; int err; @@ -2402,7 +2402,7 @@ static int ice_sync_phy_timer_e822(struct ice_hw *hw, u8 port) return -EBUSY; } - err = ice_read_phy_and_phc_time_e822(hw, port, &phy_time, &phc_time); + err = ice_read_phy_and_phc_time_e82x(hw, port, &phy_time, &phc_time); if (err) goto err_unlock; @@ -2416,7 +2416,7 @@ static int ice_sync_phy_timer_e822(struct ice_hw *hw, u8 port) */ difference = phc_time - phy_time; - err = ice_ptp_prep_port_adj_e822(hw, port, (s64)difference); + err = ice_ptp_prep_port_adj_e82x(hw, port, (s64)difference); if (err) goto err_unlock; @@ -2433,7 +2433,7 @@ static int ice_sync_phy_timer_e822(struct ice_hw *hw, u8 port) /* Re-capture the timer values to flush the command registers and * verify that the time was properly adjusted. */ - err = ice_read_phy_and_phc_time_e822(hw, port, &phy_time, &phc_time); + err = ice_read_phy_and_phc_time_e82x(hw, port, &phy_time, &phc_time); if (err) goto err_unlock; @@ -2452,7 +2452,7 @@ err_unlock: } /** - * ice_stop_phy_timer_e822 - Stop the PHY clock timer + * ice_stop_phy_timer_e82x - Stop the PHY clock timer * @hw: pointer to the HW struct * @port: the PHY port to stop * @soft_reset: if true, hold the SOFT_RESET bit of P_REG_PS @@ -2462,36 +2462,36 @@ err_unlock: * initialized or when link speed changes. */ int -ice_stop_phy_timer_e822(struct ice_hw *hw, u8 port, bool soft_reset) +ice_stop_phy_timer_e82x(struct ice_hw *hw, u8 port, bool soft_reset) { int err; u32 val; - err = ice_write_phy_reg_e822(hw, port, P_REG_TX_OR, 0); + err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_OR, 0); if (err) return err; - err = ice_write_phy_reg_e822(hw, port, P_REG_RX_OR, 0); + err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_OR, 0); if (err) return err; - err = ice_read_phy_reg_e822(hw, port, P_REG_PS, &val); + err = ice_read_phy_reg_e82x(hw, port, P_REG_PS, &val); if (err) return err; val &= ~P_REG_PS_START_M; - err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val); + err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val); if (err) return err; val &= ~P_REG_PS_ENA_CLK_M; - err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val); + err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val); if (err) return err; if (soft_reset) { val |= P_REG_PS_SFT_RESET_M; - err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val); + err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val); if (err) return err; } @@ -2502,7 +2502,7 @@ ice_stop_phy_timer_e822(struct ice_hw *hw, u8 port, bool soft_reset) } /** - * ice_start_phy_timer_e822 - Start the PHY clock timer + * ice_start_phy_timer_e82x - Start the PHY clock timer * @hw: pointer to the HW struct * @port: the PHY port to start * @@ -2512,7 +2512,7 @@ ice_stop_phy_timer_e822(struct ice_hw *hw, u8 port, bool soft_reset) * * Hardware will take Vernier measurements on Tx or Rx of packets. */ -int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port) +int ice_start_phy_timer_e82x(struct ice_hw *hw, u8 port) { u32 lo, hi, val; u64 incval; @@ -2521,17 +2521,17 @@ int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port) tmr_idx = ice_get_ptp_src_clock_index(hw); - err = ice_stop_phy_timer_e822(hw, port, false); + err = ice_stop_phy_timer_e82x(hw, port, false); if (err) return err; - ice_phy_cfg_lane_e822(hw, port); + ice_phy_cfg_lane_e82x(hw, port); - err = ice_phy_cfg_uix_e822(hw, port); + err = ice_phy_cfg_uix_e82x(hw, port); if (err) return err; - err = ice_phy_cfg_parpcs_e822(hw, port); + err = ice_phy_cfg_parpcs_e82x(hw, port); if (err) return err; @@ -2539,7 +2539,7 @@ int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port) hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx)); incval = (u64)hi << 32 | lo; - err = ice_write_40b_phy_reg_e822(hw, port, P_REG_TIMETUS_L, incval); + err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_TIMETUS_L, incval); if (err) return err; @@ -2552,22 +2552,22 @@ int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port) ice_ptp_exec_tmr_cmd(hw); - err = ice_read_phy_reg_e822(hw, port, P_REG_PS, &val); + err = ice_read_phy_reg_e82x(hw, port, P_REG_PS, &val); if (err) return err; val |= P_REG_PS_SFT_RESET_M; - err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val); + err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val); if (err) return err; val |= P_REG_PS_START_M; - err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val); + err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val); if (err) return err; val &= ~P_REG_PS_SFT_RESET_M; - err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val); + err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val); if (err) return err; @@ -2578,18 +2578,18 @@ int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port) ice_ptp_exec_tmr_cmd(hw); val |= P_REG_PS_ENA_CLK_M; - err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val); + err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val); if (err) return err; val |= P_REG_PS_LOAD_OFFSET_M; - err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val); + err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val); if (err) return err; ice_ptp_exec_tmr_cmd(hw); - err = ice_sync_phy_timer_e822(hw, port); + err = ice_sync_phy_timer_e82x(hw, port); if (err) return err; @@ -2599,7 +2599,7 @@ int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port) } /** - * ice_get_phy_tx_tstamp_ready_e822 - Read Tx memory status register + * ice_get_phy_tx_tstamp_ready_e82x - Read Tx memory status register * @hw: pointer to the HW struct * @quad: the timestamp quad to read from * @tstamp_ready: contents of the Tx memory status register @@ -2609,19 +2609,19 @@ int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port) * ready to be captured from the PHY timestamp block. */ static int -ice_get_phy_tx_tstamp_ready_e822(struct ice_hw *hw, u8 quad, u64 *tstamp_ready) +ice_get_phy_tx_tstamp_ready_e82x(struct ice_hw *hw, u8 quad, u64 *tstamp_ready) { u32 hi, lo; int err; - err = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEMORY_STATUS_U, &hi); + err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEMORY_STATUS_U, &hi); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEMORY_STATUS_U for quad %u, err %d\n", quad, err); return err; } - err = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEMORY_STATUS_L, &lo); + err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEMORY_STATUS_L, &lo); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEMORY_STATUS_L for quad %u, err %d\n", quad, err); @@ -3307,7 +3307,7 @@ void ice_ptp_init_phy_model(struct ice_hw *hw) if (ice_is_e810(hw)) hw->phy_model = ICE_PHY_E810; else - hw->phy_model = ICE_PHY_E822; + hw->phy_model = ICE_PHY_E82X; } /** @@ -3332,8 +3332,8 @@ static int ice_ptp_tmr_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) case ICE_PHY_E810: err = ice_ptp_port_cmd_e810(hw, cmd); break; - case ICE_PHY_E822: - err = ice_ptp_port_cmd_e822(hw, cmd); + case ICE_PHY_E82X: + err = ice_ptp_port_cmd_e82x(hw, cmd); break; default: err = -EOPNOTSUPP; @@ -3384,8 +3384,8 @@ int ice_ptp_init_time(struct ice_hw *hw, u64 time) case ICE_PHY_E810: err = ice_ptp_prep_phy_time_e810(hw, time & 0xFFFFFFFF); break; - case ICE_PHY_E822: - err = ice_ptp_prep_phy_time_e822(hw, time & 0xFFFFFFFF); + case ICE_PHY_E82X: + err = ice_ptp_prep_phy_time_e82x(hw, time & 0xFFFFFFFF); break; default: err = -EOPNOTSUPP; @@ -3426,8 +3426,8 @@ int ice_ptp_write_incval(struct ice_hw *hw, u64 incval) case ICE_PHY_E810: err = ice_ptp_prep_phy_incval_e810(hw, incval); break; - case ICE_PHY_E822: - err = ice_ptp_prep_phy_incval_e822(hw, incval); + case ICE_PHY_E82X: + err = ice_ptp_prep_phy_incval_e82x(hw, incval); break; default: err = -EOPNOTSUPP; @@ -3492,8 +3492,8 @@ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj) case ICE_PHY_E810: err = ice_ptp_prep_phy_adj_e810(hw, adj); break; - case ICE_PHY_E822: - err = ice_ptp_prep_phy_adj_e822(hw, adj); + case ICE_PHY_E82X: + err = ice_ptp_prep_phy_adj_e82x(hw, adj); break; default: err = -EOPNOTSUPP; @@ -3521,8 +3521,8 @@ int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp) switch (hw->phy_model) { case ICE_PHY_E810: return ice_read_phy_tstamp_e810(hw, block, idx, tstamp); - case ICE_PHY_E822: - return ice_read_phy_tstamp_e822(hw, block, idx, tstamp); + case ICE_PHY_E82X: + return ice_read_phy_tstamp_e82x(hw, block, idx, tstamp); default: return -EOPNOTSUPP; } @@ -3549,8 +3549,8 @@ int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx) switch (hw->phy_model) { case ICE_PHY_E810: return ice_clear_phy_tstamp_e810(hw, block, idx); - case ICE_PHY_E822: - return ice_clear_phy_tstamp_e822(hw, block, idx); + case ICE_PHY_E82X: + return ice_clear_phy_tstamp_e82x(hw, block, idx); default: return -EOPNOTSUPP; } @@ -3608,8 +3608,8 @@ static int ice_get_pf_c827_idx(struct ice_hw *hw, u8 *idx) void ice_ptp_reset_ts_memory(struct ice_hw *hw) { switch (hw->phy_model) { - case ICE_PHY_E822: - ice_ptp_reset_ts_memory_e822(hw); + case ICE_PHY_E82X: + ice_ptp_reset_ts_memory_e82x(hw); break; case ICE_PHY_E810: default: @@ -3636,8 +3636,8 @@ int ice_ptp_init_phc(struct ice_hw *hw) switch (hw->phy_model) { case ICE_PHY_E810: return ice_ptp_init_phc_e810(hw); - case ICE_PHY_E822: - return ice_ptp_init_phc_e822(hw); + case ICE_PHY_E82X: + return ice_ptp_init_phc_e82x(hw); default: return -EOPNOTSUPP; } @@ -3660,8 +3660,8 @@ int ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready) case ICE_PHY_E810: return ice_get_phy_tx_tstamp_ready_e810(hw, block, tstamp_ready); - case ICE_PHY_E822: - return ice_get_phy_tx_tstamp_ready_e822(hw, block, + case ICE_PHY_E82X: + return ice_get_phy_tx_tstamp_ready_e82x(hw, block, tstamp_ready); break; default: @@ -3942,7 +3942,7 @@ int ice_get_cgu_rclk_pin_info(struct ice_hw *hw, u8 *base_idx, u8 *pin_num) case ICE_DEV_ID_E823C_QSFP: case ICE_DEV_ID_E823C_SFP: case ICE_DEV_ID_E823C_SGMII: - *pin_num = ICE_E822_RCLK_PINS_NUM; + *pin_num = ICE_E82X_RCLK_PINS_NUM; ret = 0; if (hw->cgu_part_number == ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032) diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h index cf76701566..1f3e031244 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h +++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h @@ -42,7 +42,7 @@ enum ice_ptp_fec_mode { }; /** - * struct ice_time_ref_info_e822 + * struct ice_time_ref_info_e82x * @pll_freq: Frequency of PLL that drives timer ticks in Hz * @nominal_incval: increment to generate nanoseconds in GLTSYN_TIME_L * @pps_delay: propagation delay of the PPS output signal @@ -50,14 +50,14 @@ enum ice_ptp_fec_mode { * Characteristic information for the various TIME_REF sources possible in the * E822 devices */ -struct ice_time_ref_info_e822 { +struct ice_time_ref_info_e82x { u64 pll_freq; u64 nominal_incval; u8 pps_delay; }; /** - * struct ice_vernier_info_e822 + * struct ice_vernier_info_e82x * @tx_par_clk: Frequency used to calculate P_REG_PAR_TX_TUS * @rx_par_clk: Frequency used to calculate P_REG_PAR_RX_TUS * @tx_pcs_clk: Frequency used to calculate P_REG_PCS_TX_TUS @@ -80,7 +80,7 @@ struct ice_time_ref_info_e822 { * different link speeds, either the deskew marker for multi-lane link speeds * or the Reed Solomon gearbox marker for RS-FEC. */ -struct ice_vernier_info_e822 { +struct ice_vernier_info_e82x { u32 tx_par_clk; u32 rx_par_clk; u32 tx_pcs_clk; @@ -95,7 +95,7 @@ struct ice_vernier_info_e822 { }; /** - * struct ice_cgu_pll_params_e822 + * struct ice_cgu_pll_params_e82x * @refclk_pre_div: Reference clock pre-divisor * @feedback_div: Feedback divisor * @frac_n_div: Fractional divisor @@ -104,7 +104,7 @@ struct ice_vernier_info_e822 { * Clock Generation Unit parameters used to program the PLL based on the * selected TIME_REF frequency. */ -struct ice_cgu_pll_params_e822 { +struct ice_cgu_pll_params_e82x { u32 refclk_pre_div; u32 feedback_div; u32 frac_n_div; @@ -124,7 +124,7 @@ enum ice_phy_rclk_pins { }; #define ICE_E810_RCLK_PINS_NUM (ICE_RCLKB_PIN + 1) -#define ICE_E822_RCLK_PINS_NUM (ICE_RCLKA_PIN + 1) +#define ICE_E82X_RCLK_PINS_NUM (ICE_RCLKA_PIN + 1) #define E810T_CGU_INPUT_C827(_phy, _pin) ((_phy) * ICE_E810_RCLK_PINS_NUM + \ (_pin) + ZL_REF1P) @@ -183,16 +183,16 @@ struct ice_cgu_pin_desc { }; extern const struct -ice_cgu_pll_params_e822 e822_cgu_params[NUM_ICE_TIME_REF_FREQ]; +ice_cgu_pll_params_e82x e822_cgu_params[NUM_ICE_TIME_REF_FREQ]; #define E810C_QSFP_C827_0_HANDLE 2 #define E810C_QSFP_C827_1_HANDLE 3 /* Table of constants related to possible TIME_REF sources */ -extern const struct ice_time_ref_info_e822 e822_time_ref[NUM_ICE_TIME_REF_FREQ]; +extern const struct ice_time_ref_info_e82x e822_time_ref[NUM_ICE_TIME_REF_FREQ]; /* Table of constants for Vernier calibration on E822 */ -extern const struct ice_vernier_info_e822 e822_vernier[NUM_ICE_PTP_LNK_SPD]; +extern const struct ice_vernier_info_e82x e822_vernier[NUM_ICE_PTP_LNK_SPD]; /* Increment value to generate nanoseconds in the GLTSYN_TIME_L register for * the E810 devices. Based off of a PLL with an 812.5 MHz frequency. @@ -215,23 +215,23 @@ int ice_ptp_init_phc(struct ice_hw *hw); int ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready); /* E822 family functions */ -int ice_read_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 *val); -int ice_write_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 val); -void ice_ptp_reset_ts_memory_quad_e822(struct ice_hw *hw, u8 quad); +int ice_read_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 *val); +int ice_write_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 val); +void ice_ptp_reset_ts_memory_quad_e82x(struct ice_hw *hw, u8 quad); /** - * ice_e822_time_ref - Get the current TIME_REF from capabilities + * ice_e82x_time_ref - Get the current TIME_REF from capabilities * @hw: pointer to the HW structure * * Returns the current TIME_REF from the capabilities structure. */ -static inline enum ice_time_ref_freq ice_e822_time_ref(struct ice_hw *hw) +static inline enum ice_time_ref_freq ice_e82x_time_ref(struct ice_hw *hw) { return hw->func_caps.ts_func_info.time_ref; } /** - * ice_set_e822_time_ref - Set new TIME_REF + * ice_set_e82x_time_ref - Set new TIME_REF * @hw: pointer to the HW structure * @time_ref: new TIME_REF to set * @@ -239,31 +239,31 @@ static inline enum ice_time_ref_freq ice_e822_time_ref(struct ice_hw *hw) * change, such as an update to the CGU registers. */ static inline void -ice_set_e822_time_ref(struct ice_hw *hw, enum ice_time_ref_freq time_ref) +ice_set_e82x_time_ref(struct ice_hw *hw, enum ice_time_ref_freq time_ref) { hw->func_caps.ts_func_info.time_ref = time_ref; } -static inline u64 ice_e822_pll_freq(enum ice_time_ref_freq time_ref) +static inline u64 ice_e82x_pll_freq(enum ice_time_ref_freq time_ref) { return e822_time_ref[time_ref].pll_freq; } -static inline u64 ice_e822_nominal_incval(enum ice_time_ref_freq time_ref) +static inline u64 ice_e82x_nominal_incval(enum ice_time_ref_freq time_ref) { return e822_time_ref[time_ref].nominal_incval; } -static inline u64 ice_e822_pps_delay(enum ice_time_ref_freq time_ref) +static inline u64 ice_e82x_pps_delay(enum ice_time_ref_freq time_ref) { return e822_time_ref[time_ref].pps_delay; } /* E822 Vernier calibration functions */ -int ice_stop_phy_timer_e822(struct ice_hw *hw, u8 port, bool soft_reset); -int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port); -int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port); -int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port); +int ice_stop_phy_timer_e82x(struct ice_hw *hw, u8 port, bool soft_reset); +int ice_start_phy_timer_e82x(struct ice_hw *hw, u8 port); +int ice_phy_cfg_tx_offset_e82x(struct ice_hw *hw, u8 port); +int ice_phy_cfg_rx_offset_e82x(struct ice_hw *hw, u8 port); /* E810 family functions */ int ice_ptp_init_phy_e810(struct ice_hw *hw); @@ -509,6 +509,7 @@ int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id, #define TS_LL_READ_RETRIES 200 #define TS_LL_READ_TS_HIGH GENMASK(23, 16) #define TS_LL_READ_TS_IDX GENMASK(29, 24) +#define TS_LL_READ_TS_INTR BIT(30) #define TS_LL_READ_TS BIT(31) /* Internal PHY timestamp address */ diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c index c686ac0935..5f30fb131f 100644 --- a/drivers/net/ethernet/intel/ice/ice_repr.c +++ b/drivers/net/ethernet/intel/ice/ice_repr.c @@ -14,7 +14,7 @@ */ static int ice_repr_get_sw_port_id(struct ice_repr *repr) { - return repr->vf->pf->hw.port_info->lport; + return repr->src_vsi->back->hw.port_info->lport; } /** @@ -35,7 +35,7 @@ ice_repr_get_phys_port_name(struct net_device *netdev, char *buf, size_t len) return -EOPNOTSUPP; res = snprintf(buf, len, "pf%dvfr%d", ice_repr_get_sw_port_id(repr), - repr->vf->vf_id); + repr->id); if (res <= 0) return -EOPNOTSUPP; return 0; @@ -278,25 +278,67 @@ ice_repr_reg_netdev(struct net_device *netdev) return register_netdev(netdev); } +static void ice_repr_remove_node(struct devlink_port *devlink_port) +{ + devl_lock(devlink_port->devlink); + devl_rate_leaf_destroy(devlink_port); + devl_unlock(devlink_port->devlink); +} + /** - * ice_repr_add - add representor for VF - * @vf: pointer to VF structure + * ice_repr_rem - remove representor from VF + * @repr: pointer to representor structure */ -static int ice_repr_add(struct ice_vf *vf) +static void ice_repr_rem(struct ice_repr *repr) +{ + kfree(repr->q_vector); + free_netdev(repr->netdev); + kfree(repr); +} + +/** + * ice_repr_rem_vf - remove representor from VF + * @repr: pointer to representor structure + */ +void ice_repr_rem_vf(struct ice_repr *repr) +{ + ice_repr_remove_node(&repr->vf->devlink_port); + unregister_netdev(repr->netdev); + ice_devlink_destroy_vf_port(repr->vf); + ice_virtchnl_set_dflt_ops(repr->vf); + ice_repr_rem(repr); +} + +static void ice_repr_set_tx_topology(struct ice_pf *pf) +{ + struct devlink *devlink; + + /* only export if ADQ and DCB disabled and eswitch enabled*/ + if (ice_is_adq_active(pf) || ice_is_dcb_active(pf) || + !ice_is_switchdev_running(pf)) + return; + + devlink = priv_to_devlink(pf); + ice_devlink_rate_init_tx_topology(devlink, ice_get_main_vsi(pf)); +} + +/** + * ice_repr_add - add representor for generic VSI + * @pf: pointer to PF structure + * @src_vsi: pointer to VSI structure of device to represent + * @parent_mac: device MAC address + */ +static struct ice_repr * +ice_repr_add(struct ice_pf *pf, struct ice_vsi *src_vsi, const u8 *parent_mac) { struct ice_q_vector *q_vector; struct ice_netdev_priv *np; struct ice_repr *repr; - struct ice_vsi *vsi; int err; - vsi = ice_get_vf_vsi(vf); - if (!vsi) - return -EINVAL; - repr = kzalloc(sizeof(*repr), GFP_KERNEL); if (!repr) - return -ENOMEM; + return ERR_PTR(-ENOMEM); repr->netdev = alloc_etherdev(sizeof(struct ice_netdev_priv)); if (!repr->netdev) { @@ -304,9 +346,7 @@ static int ice_repr_add(struct ice_vf *vf) goto err_alloc; } - repr->src_vsi = vsi; - repr->vf = vf; - vf->repr = repr; + repr->src_vsi = src_vsi; np = netdev_priv(repr->netdev); np->repr = repr; @@ -316,10 +356,40 @@ static int ice_repr_add(struct ice_vf *vf) goto err_alloc_q_vector; } repr->q_vector = q_vector; + repr->q_id = repr->id; + + ether_addr_copy(repr->parent_mac, parent_mac); + + return repr; + +err_alloc_q_vector: + free_netdev(repr->netdev); +err_alloc: + kfree(repr); + return ERR_PTR(err); +} + +struct ice_repr *ice_repr_add_vf(struct ice_vf *vf) +{ + struct ice_repr *repr; + struct ice_vsi *vsi; + int err; + + vsi = ice_get_vf_vsi(vf); + if (!vsi) + return ERR_PTR(-ENOENT); err = ice_devlink_create_vf_port(vf); if (err) - goto err_devlink; + return ERR_PTR(err); + + repr = ice_repr_add(vf->pf, vsi, vf->hw_lan_addr); + if (IS_ERR(repr)) { + err = PTR_ERR(repr); + goto err_repr_add; + } + + repr->vf = vf; repr->netdev->min_mtu = ETH_MIN_MTU; repr->netdev->max_mtu = ICE_MAX_MTU; @@ -331,100 +401,23 @@ static int ice_repr_add(struct ice_vf *vf) goto err_netdev; ice_virtchnl_set_repr_ops(vf); + ice_repr_set_tx_topology(vf->pf); - return 0; + return repr; err_netdev: + ice_repr_rem(repr); +err_repr_add: ice_devlink_destroy_vf_port(vf); -err_devlink: - kfree(repr->q_vector); - vf->repr->q_vector = NULL; -err_alloc_q_vector: - free_netdev(repr->netdev); - repr->netdev = NULL; -err_alloc: - kfree(repr); - vf->repr = NULL; - return err; -} - -/** - * ice_repr_rem - remove representor from VF - * @vf: pointer to VF structure - */ -static void ice_repr_rem(struct ice_vf *vf) -{ - if (!vf->repr) - return; - - kfree(vf->repr->q_vector); - vf->repr->q_vector = NULL; - unregister_netdev(vf->repr->netdev); - ice_devlink_destroy_vf_port(vf); - free_netdev(vf->repr->netdev); - vf->repr->netdev = NULL; - kfree(vf->repr); - vf->repr = NULL; - - ice_virtchnl_set_dflt_ops(vf); -} - -/** - * ice_repr_rem_from_all_vfs - remove port representor for all VFs - * @pf: pointer to PF structure - */ -void ice_repr_rem_from_all_vfs(struct ice_pf *pf) -{ - struct devlink *devlink; - struct ice_vf *vf; - unsigned int bkt; - - lockdep_assert_held(&pf->vfs.table_lock); - - ice_for_each_vf(pf, bkt, vf) - ice_repr_rem(vf); - - /* since all port representors are destroyed, there is - * no point in keeping the nodes - */ - devlink = priv_to_devlink(pf); - devl_lock(devlink); - devl_rate_nodes_destroy(devlink); - devl_unlock(devlink); + return ERR_PTR(err); } -/** - * ice_repr_add_for_all_vfs - add port representor for all VFs - * @pf: pointer to PF structure - */ -int ice_repr_add_for_all_vfs(struct ice_pf *pf) +struct ice_repr *ice_repr_get_by_vsi(struct ice_vsi *vsi) { - struct devlink *devlink; - struct ice_vf *vf; - unsigned int bkt; - int err; - - lockdep_assert_held(&pf->vfs.table_lock); - - ice_for_each_vf(pf, bkt, vf) { - err = ice_repr_add(vf); - if (err) - goto err; - } - - /* only export if ADQ and DCB disabled */ - if (ice_is_adq_active(pf) || ice_is_dcb_active(pf)) - return 0; - - devlink = priv_to_devlink(pf); - ice_devlink_rate_init_tx_topology(devlink, ice_get_main_vsi(pf)); - - return 0; - -err: - ice_repr_rem_from_all_vfs(pf); + if (!vsi->vf) + return NULL; - return err; + return xa_load(&vsi->back->eswitch.reprs, vsi->vf->repr_id); } /** diff --git a/drivers/net/ethernet/intel/ice/ice_repr.h b/drivers/net/ethernet/intel/ice/ice_repr.h index e1ee2d2c1d..f9aede3157 100644 --- a/drivers/net/ethernet/intel/ice/ice_repr.h +++ b/drivers/net/ethernet/intel/ice/ice_repr.h @@ -13,14 +13,17 @@ struct ice_repr { struct net_device *netdev; struct metadata_dst *dst; struct ice_esw_br_port *br_port; + int q_id; + u32 id; + u8 parent_mac[ETH_ALEN]; #ifdef CONFIG_ICE_SWITCHDEV /* info about slow path rule */ struct ice_rule_query_data sp_rule; #endif }; -int ice_repr_add_for_all_vfs(struct ice_pf *pf); -void ice_repr_rem_from_all_vfs(struct ice_pf *pf); +struct ice_repr *ice_repr_add_vf(struct ice_vf *vf); +void ice_repr_rem_vf(struct ice_repr *repr); void ice_repr_start_tx_queues(struct ice_repr *repr); void ice_repr_stop_tx_queues(struct ice_repr *repr); @@ -29,4 +32,6 @@ void ice_repr_set_traffic_vsi(struct ice_repr *repr, struct ice_vsi *vsi); struct ice_repr *ice_netdev_to_repr(struct net_device *netdev); bool ice_is_port_repr_netdev(const struct net_device *netdev); + +struct ice_repr *ice_repr_get_by_vsi(struct ice_vsi *vsi); #endif diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c index 2f4a621254..d174a4eeb8 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.c +++ b/drivers/net/ethernet/intel/ice/ice_sched.c @@ -1387,8 +1387,7 @@ void ice_sched_get_psm_clk_freq(struct ice_hw *hw) u32 val, clk_src; val = rd32(hw, GLGEN_CLKSTAT_SRC); - clk_src = (val & GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_M) >> - GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_S; + clk_src = FIELD_GET(GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_M, val); #define PSM_CLK_SRC_367_MHZ 0x0 #define PSM_CLK_SRC_416_MHZ 0x1 diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c index cd61928700..b0f78c2f27 100644 --- a/drivers/net/ethernet/intel/ice/ice_sriov.c +++ b/drivers/net/ethernet/intel/ice/ice_sriov.c @@ -106,10 +106,8 @@ static void ice_dis_vf_mappings(struct ice_vf *vf) for (v = first; v <= last; v++) { u32 reg; - reg = (((1 << GLINT_VECT2FUNC_IS_PF_S) & - GLINT_VECT2FUNC_IS_PF_M) | - ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) & - GLINT_VECT2FUNC_PF_NUM_M)); + reg = FIELD_PREP(GLINT_VECT2FUNC_IS_PF_M, 1) | + FIELD_PREP(GLINT_VECT2FUNC_PF_NUM_M, hw->pf_id); wr32(hw, GLINT_VECT2FUNC(v), reg); } @@ -172,13 +170,14 @@ void ice_free_vfs(struct ice_pf *pf) else dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n"); - mutex_lock(&vfs->table_lock); + ice_eswitch_reserve_cp_queues(pf, -ice_get_num_vfs(pf)); - ice_eswitch_release(pf); + mutex_lock(&vfs->table_lock); ice_for_each_vf(pf, bkt, vf) { mutex_lock(&vf->cfg_lock); + ice_eswitch_detach(pf, vf); ice_dis_vf_qs(vf); if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { @@ -274,24 +273,20 @@ static void ice_ena_vf_msix_mappings(struct ice_vf *vf) (device_based_first_msix + vf->num_msix) - 1; device_based_vf_id = vf->vf_id + hw->func_caps.vf_base_id; - reg = (((device_based_first_msix << VPINT_ALLOC_FIRST_S) & - VPINT_ALLOC_FIRST_M) | - ((device_based_last_msix << VPINT_ALLOC_LAST_S) & - VPINT_ALLOC_LAST_M) | VPINT_ALLOC_VALID_M); + reg = FIELD_PREP(VPINT_ALLOC_FIRST_M, device_based_first_msix) | + FIELD_PREP(VPINT_ALLOC_LAST_M, device_based_last_msix) | + VPINT_ALLOC_VALID_M; wr32(hw, VPINT_ALLOC(vf->vf_id), reg); - reg = (((device_based_first_msix << VPINT_ALLOC_PCI_FIRST_S) - & VPINT_ALLOC_PCI_FIRST_M) | - ((device_based_last_msix << VPINT_ALLOC_PCI_LAST_S) & - VPINT_ALLOC_PCI_LAST_M) | VPINT_ALLOC_PCI_VALID_M); + reg = FIELD_PREP(VPINT_ALLOC_PCI_FIRST_M, device_based_first_msix) | + FIELD_PREP(VPINT_ALLOC_PCI_LAST_M, device_based_last_msix) | + VPINT_ALLOC_PCI_VALID_M; wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg); /* map the interrupts to its functions */ for (v = pf_based_first_msix; v <= pf_based_last_msix; v++) { - reg = (((device_based_vf_id << GLINT_VECT2FUNC_VF_NUM_S) & - GLINT_VECT2FUNC_VF_NUM_M) | - ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) & - GLINT_VECT2FUNC_PF_NUM_M)); + reg = FIELD_PREP(GLINT_VECT2FUNC_VF_NUM_M, device_based_vf_id) | + FIELD_PREP(GLINT_VECT2FUNC_PF_NUM_M, hw->pf_id); wr32(hw, GLINT_VECT2FUNC(v), reg); } @@ -324,10 +319,8 @@ static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq) * VFNUMQ value should be set to (number of queues - 1). A value * of 0 means 1 queue and a value of 255 means 256 queues */ - reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) & - VPLAN_TX_QBASE_VFFIRSTQ_M) | - (((max_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) & - VPLAN_TX_QBASE_VFNUMQ_M)); + reg = FIELD_PREP(VPLAN_TX_QBASE_VFFIRSTQ_M, vsi->txq_map[0]) | + FIELD_PREP(VPLAN_TX_QBASE_VFNUMQ_M, max_txq - 1); wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg); } else { dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n"); @@ -342,10 +335,8 @@ static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq) * VFNUMQ value should be set to (number of queues - 1). A value * of 0 means 1 queue and a value of 255 means 256 queues */ - reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) & - VPLAN_RX_QBASE_VFFIRSTQ_M) | - (((max_rxq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) & - VPLAN_RX_QBASE_VFNUMQ_M)); + reg = FIELD_PREP(VPLAN_RX_QBASE_VFFIRSTQ_M, vsi->rxq_map[0]) | + FIELD_PREP(VPLAN_RX_QBASE_VFNUMQ_M, max_rxq - 1); wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg); } else { dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n"); @@ -609,6 +600,14 @@ static int ice_start_vfs(struct ice_pf *pf) goto teardown; } + retval = ice_eswitch_attach(pf, vf); + if (retval) { + dev_err(ice_pf_to_dev(pf), "Failed to attach VF %d to eswitch, error %d", + vf->vf_id, retval); + ice_vf_vsi_release(vf); + goto teardown; + } + set_bit(ICE_VF_STATE_INIT, vf->vf_states); ice_ena_vf_mappings(vf); wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE); @@ -899,6 +898,7 @@ static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs) goto err_unroll_sriov; } + ice_eswitch_reserve_cp_queues(pf, num_vfs); ret = ice_start_vfs(pf); if (ret) { dev_err(dev, "Failed to start %d VFs, err %d\n", num_vfs, ret); @@ -908,12 +908,6 @@ static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs) clear_bit(ICE_VF_DIS, pf->state); - ret = ice_eswitch_configure(pf); - if (ret) { - dev_err(dev, "Failed to configure eswitch, err %d\n", ret); - goto err_unroll_sriov; - } - /* rearm global interrupts */ if (test_and_clear_bit(ICE_OICR_INTR_DIS, pf->state)) ice_irq_dynamic_ena(hw, NULL, NULL); @@ -1311,8 +1305,7 @@ ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event) dev_dbg(ice_pf_to_dev(pf), "GLDCB_RTCTQ: 0x%08x\n", gldcb_rtctq); /* event returns device global Rx queue number */ - queue = (gldcb_rtctq & GLDCB_RTCTQ_RXQNUM_M) >> - GLDCB_RTCTQ_RXQNUM_S; + queue = FIELD_GET(GLDCB_RTCTQ_RXQNUM_M, gldcb_rtctq); vf = ice_get_vf_from_pfq(pf, ice_globalq_to_pfq(pf, queue)); if (!vf) diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index ee19f3aa3d..ba0ef91e4c 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -2025,12 +2025,12 @@ error_out: * ice_aq_map_recipe_to_profile - Map recipe to packet profile * @hw: pointer to the HW struct * @profile_id: package profile ID to associate the recipe with - * @r_bitmap: Recipe bitmap filled in and need to be returned as response + * @r_assoc: Recipe bitmap filled in and need to be returned as response * @cd: pointer to command details structure or NULL * Recipe to profile association (0x0291) */ int -ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap, +ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u64 r_assoc, struct ice_sq_cd *cd) { struct ice_aqc_recipe_to_profile *cmd; @@ -2042,7 +2042,7 @@ ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap, /* Set the recipe ID bit in the bitmask to let the device know which * profile we are associating the recipe to */ - memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc)); + cmd->recipe_assoc = cpu_to_le64(r_assoc); return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); } @@ -2051,12 +2051,12 @@ ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap, * ice_aq_get_recipe_to_profile - Map recipe to packet profile * @hw: pointer to the HW struct * @profile_id: package profile ID to associate the recipe with - * @r_bitmap: Recipe bitmap filled in and need to be returned as response + * @r_assoc: Recipe bitmap filled in and need to be returned as response * @cd: pointer to command details structure or NULL * Associate profile ID with given recipe (0x0293) */ int -ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap, +ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u64 *r_assoc, struct ice_sq_cd *cd) { struct ice_aqc_recipe_to_profile *cmd; @@ -2069,7 +2069,7 @@ ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap, status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); if (!status) - memcpy(r_bitmap, cmd->recipe_assoc, sizeof(cmd->recipe_assoc)); + *r_assoc = le64_to_cpu(cmd->recipe_assoc); return status; } @@ -2108,6 +2108,7 @@ int ice_alloc_recipe(struct ice_hw *hw, u16 *rid) static void ice_get_recp_to_prof_map(struct ice_hw *hw) { DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES); + u64 recp_assoc; u16 i; for (i = 0; i < hw->switch_info->max_used_prof_index + 1; i++) { @@ -2115,8 +2116,9 @@ static void ice_get_recp_to_prof_map(struct ice_hw *hw) bitmap_zero(profile_to_recipe[i], ICE_MAX_NUM_RECIPES); bitmap_zero(r_bitmap, ICE_MAX_NUM_RECIPES); - if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL)) + if (ice_aq_get_recipe_to_profile(hw, i, &recp_assoc, NULL)) continue; + bitmap_from_arr64(r_bitmap, &recp_assoc, ICE_MAX_NUM_RECIPES); bitmap_copy(profile_to_recipe[i], r_bitmap, ICE_MAX_NUM_RECIPES); for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES) @@ -2492,25 +2494,24 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info, switch (f_info->fltr_act) { case ICE_FWD_TO_VSI: - act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) & - ICE_SINGLE_ACT_VSI_ID_M; + act |= FIELD_PREP(ICE_SINGLE_ACT_VSI_ID_M, + f_info->fwd_id.hw_vsi_id); if (f_info->lkup_type != ICE_SW_LKUP_VLAN) act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT; break; case ICE_FWD_TO_VSI_LIST: act |= ICE_SINGLE_ACT_VSI_LIST; - act |= (f_info->fwd_id.vsi_list_id << - ICE_SINGLE_ACT_VSI_LIST_ID_S) & - ICE_SINGLE_ACT_VSI_LIST_ID_M; + act |= FIELD_PREP(ICE_SINGLE_ACT_VSI_LIST_ID_M, + f_info->fwd_id.vsi_list_id); if (f_info->lkup_type != ICE_SW_LKUP_VLAN) act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT; break; case ICE_FWD_TO_Q: act |= ICE_SINGLE_ACT_TO_Q; - act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) & - ICE_SINGLE_ACT_Q_INDEX_M; + act |= FIELD_PREP(ICE_SINGLE_ACT_Q_INDEX_M, + f_info->fwd_id.q_id); break; case ICE_DROP_PACKET: act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP | @@ -2520,10 +2521,9 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info, q_rgn = f_info->qgrp_size > 0 ? (u8)ilog2(f_info->qgrp_size) : 0; act |= ICE_SINGLE_ACT_TO_Q; - act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) & - ICE_SINGLE_ACT_Q_INDEX_M; - act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) & - ICE_SINGLE_ACT_Q_REGION_M; + act |= FIELD_PREP(ICE_SINGLE_ACT_Q_INDEX_M, + f_info->fwd_id.q_id); + act |= FIELD_PREP(ICE_SINGLE_ACT_Q_REGION_M, q_rgn); break; default: return; @@ -2649,7 +2649,7 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent, m_ent->fltr_info.fwd_id.hw_vsi_id; act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT; - act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M; + act |= FIELD_PREP(ICE_LG_ACT_VSI_LIST_ID_M, id); if (m_ent->vsi_count > 1) act |= ICE_LG_ACT_VSI_LIST; lg_act->act[0] = cpu_to_le32(act); @@ -2657,16 +2657,15 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent, /* Second action descriptor type */ act = ICE_LG_ACT_GENERIC; - act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M; + act |= FIELD_PREP(ICE_LG_ACT_GENERIC_VALUE_M, 1); lg_act->act[1] = cpu_to_le32(act); - act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX << - ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M; + act = FIELD_PREP(ICE_LG_ACT_GENERIC_OFFSET_M, + ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX); /* Third action Marker value */ act |= ICE_LG_ACT_GENERIC; - act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) & - ICE_LG_ACT_GENERIC_VALUE_M; + act |= FIELD_PREP(ICE_LG_ACT_GENERIC_VALUE_M, sw_marker); lg_act->act[2] = cpu_to_le32(act); @@ -2675,9 +2674,9 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent, ice_aqc_opc_update_sw_rules); /* Update the action to point to the large action ID */ - rx_tx->act = cpu_to_le32(ICE_SINGLE_ACT_PTR | - ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) & - ICE_SINGLE_ACT_PTR_VAL_M)); + act = ICE_SINGLE_ACT_PTR; + act |= FIELD_PREP(ICE_SINGLE_ACT_PTR_VAL_M, l_id); + rx_tx->act = cpu_to_le32(act); /* Use the filter rule ID of the previously created rule with single * act. Once the update happens, hardware will treat this as large @@ -4426,8 +4425,8 @@ ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, int status; buf->num_elems = cpu_to_le16(num_items); - buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) & - ICE_AQC_RES_TYPE_M) | alloc_shared); + buf->res_type = cpu_to_le16(FIELD_PREP(ICE_AQC_RES_TYPE_M, type) | + alloc_shared); status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_alloc_res); if (status) @@ -4454,8 +4453,8 @@ ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, int status; buf->num_elems = cpu_to_le16(num_items); - buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) & - ICE_AQC_RES_TYPE_M) | alloc_shared); + buf->res_type = cpu_to_le16(FIELD_PREP(ICE_AQC_RES_TYPE_M, type) | + alloc_shared); buf->elem[0].e.sw_resp = cpu_to_le16(counter_id); status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_free_res); @@ -4481,18 +4480,15 @@ int ice_share_res(struct ice_hw *hw, u16 type, u8 shared, u16 res_id) { DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, buf, elem, 1); u16 buf_len = __struct_size(buf); + u16 res_type; int status; buf->num_elems = cpu_to_le16(1); + res_type = FIELD_PREP(ICE_AQC_RES_TYPE_M, type); if (shared) - buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) & - ICE_AQC_RES_TYPE_M) | - ICE_AQC_RES_TYPE_FLAG_SHARED); - else - buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) & - ICE_AQC_RES_TYPE_M) & - ~ICE_AQC_RES_TYPE_FLAG_SHARED); + res_type |= ICE_AQC_RES_TYPE_FLAG_SHARED; + buf->res_type = cpu_to_le16(res_type); buf->elem[0].e.sw_resp = cpu_to_le16(res_id); status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_share_res); @@ -5024,8 +5020,8 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, entry->chain_idx = chain_idx; content->result_indx = ICE_AQ_RECIPE_RESULT_EN | - ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) & - ICE_AQ_RECIPE_RESULT_DATA_M); + FIELD_PREP(ICE_AQ_RECIPE_RESULT_DATA_M, + chain_idx); clear_bit(chain_idx, result_idx_bm); chain_idx = find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS); @@ -5396,22 +5392,24 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, */ list_for_each_entry(fvit, &rm->fv_list, list_entry) { DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES); + u64 recp_assoc; u16 j; status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id, - (u8 *)r_bitmap, NULL); + &recp_assoc, NULL); if (status) goto err_unroll; + bitmap_from_arr64(r_bitmap, &recp_assoc, ICE_MAX_NUM_RECIPES); bitmap_or(r_bitmap, r_bitmap, rm->r_bitmap, ICE_MAX_NUM_RECIPES); status = ice_acquire_change_lock(hw, ICE_RES_WRITE); if (status) goto err_unroll; + bitmap_to_arr64(&recp_assoc, r_bitmap, ICE_MAX_NUM_RECIPES); status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id, - (u8 *)r_bitmap, - NULL); + recp_assoc, NULL); ice_release_change_lock(hw); if (status) @@ -6065,6 +6063,7 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, rinfo->sw_act.fltr_act == ICE_FWD_TO_Q || rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP || rinfo->sw_act.fltr_act == ICE_DROP_PACKET || + rinfo->sw_act.fltr_act == ICE_MIRROR_PACKET || rinfo->sw_act.fltr_act == ICE_NOP)) { status = -EIO; goto free_pkt_profile; @@ -6077,9 +6076,11 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, } if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI || - rinfo->sw_act.fltr_act == ICE_NOP) + rinfo->sw_act.fltr_act == ICE_MIRROR_PACKET || + rinfo->sw_act.fltr_act == ICE_NOP) { rinfo->sw_act.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); + } if (rinfo->src_vsi) rinfo->sw_act.src = ice_get_hw_vsi_num(hw, rinfo->src_vsi); @@ -6115,38 +6116,45 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, status = -ENOMEM; goto free_pkt_profile; } - if (!rinfo->flags_info.act_valid) { - act |= ICE_SINGLE_ACT_LAN_ENABLE; - act |= ICE_SINGLE_ACT_LB_ENABLE; - } else { - act |= rinfo->flags_info.act & (ICE_SINGLE_ACT_LAN_ENABLE | - ICE_SINGLE_ACT_LB_ENABLE); + + if (rinfo->sw_act.fltr_act != ICE_MIRROR_PACKET) { + if (!rinfo->flags_info.act_valid) { + act |= ICE_SINGLE_ACT_LAN_ENABLE; + act |= ICE_SINGLE_ACT_LB_ENABLE; + } else { + act |= rinfo->flags_info.act & (ICE_SINGLE_ACT_LAN_ENABLE | + ICE_SINGLE_ACT_LB_ENABLE); + } } switch (rinfo->sw_act.fltr_act) { case ICE_FWD_TO_VSI: - act |= (rinfo->sw_act.fwd_id.hw_vsi_id << - ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M; + act |= FIELD_PREP(ICE_SINGLE_ACT_VSI_ID_M, + rinfo->sw_act.fwd_id.hw_vsi_id); act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT; break; case ICE_FWD_TO_Q: act |= ICE_SINGLE_ACT_TO_Q; - act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) & - ICE_SINGLE_ACT_Q_INDEX_M; + act |= FIELD_PREP(ICE_SINGLE_ACT_Q_INDEX_M, + rinfo->sw_act.fwd_id.q_id); break; case ICE_FWD_TO_QGRP: q_rgn = rinfo->sw_act.qgrp_size > 0 ? (u8)ilog2(rinfo->sw_act.qgrp_size) : 0; act |= ICE_SINGLE_ACT_TO_Q; - act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) & - ICE_SINGLE_ACT_Q_INDEX_M; - act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) & - ICE_SINGLE_ACT_Q_REGION_M; + act |= FIELD_PREP(ICE_SINGLE_ACT_Q_INDEX_M, + rinfo->sw_act.fwd_id.q_id); + act |= FIELD_PREP(ICE_SINGLE_ACT_Q_REGION_M, q_rgn); break; case ICE_DROP_PACKET: act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP | ICE_SINGLE_ACT_VALID_BIT; break; + case ICE_MIRROR_PACKET: + act |= ICE_SINGLE_ACT_OTHER_ACTS; + act |= FIELD_PREP(ICE_SINGLE_ACT_VSI_ID_M, + rinfo->sw_act.fwd_id.hw_vsi_id); + break; case ICE_NOP: act |= FIELD_PREP(ICE_SINGLE_ACT_VSI_ID_M, rinfo->sw_act.fwd_id.hw_vsi_id); diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h index db7e501b7e..89ffa1b51b 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.h +++ b/drivers/net/ethernet/intel/ice/ice_switch.h @@ -424,10 +424,10 @@ int ice_aq_add_recipe(struct ice_hw *hw, struct ice_aqc_recipe_data_elem *s_recipe_list, u16 num_recipes, struct ice_sq_cd *cd); int -ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap, +ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u64 *r_assoc, struct ice_sq_cd *cd); int -ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap, +ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u64 r_assoc, struct ice_sq_cd *cd); #endif /* _ICE_SWITCH_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c index dd03cb69ad..688ccb0615 100644 --- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c @@ -28,6 +28,8 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers, * - ICE_TC_FLWR_FIELD_VLAN_TPID (present if specified) * - Tunnel flag (present if tunnel) */ + if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS) + lkups_cnt++; if (flags & ICE_TC_FLWR_FIELD_TENANT_ID) lkups_cnt++; @@ -363,6 +365,11 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags, /* Always add direction metadata */ ice_rule_add_direction_metadata(&list[ICE_TC_METADATA_LKUP_IDX]); + if (tc_fltr->direction == ICE_ESWITCH_FLTR_EGRESS) { + ice_rule_add_src_vsi_metadata(&list[i]); + i++; + } + rule_info->tun_type = ice_sw_type_from_tunnel(tc_fltr->tunnel_type); if (tc_fltr->tunnel_type != TNL_LAST) { i = ice_tc_fill_tunnel_outer(flags, tc_fltr, list, i); @@ -653,7 +660,7 @@ static int ice_tc_setup_redirect_action(struct net_device *filter_dev, ice_tc_is_dev_uplink(target_dev)) { repr = ice_netdev_to_repr(filter_dev); - fltr->dest_vsi = repr->src_vsi->back->switchdev.uplink_vsi; + fltr->dest_vsi = repr->src_vsi->back->eswitch.uplink_vsi; fltr->direction = ICE_ESWITCH_FLTR_EGRESS; } else if (ice_tc_is_dev_uplink(filter_dev) && ice_is_port_repr_netdev(target_dev)) { @@ -689,6 +696,41 @@ ice_tc_setup_drop_action(struct net_device *filter_dev, return 0; } +static int ice_tc_setup_mirror_action(struct net_device *filter_dev, + struct ice_tc_flower_fltr *fltr, + struct net_device *target_dev) +{ + struct ice_repr *repr; + + fltr->action.fltr_act = ICE_MIRROR_PACKET; + + if (ice_is_port_repr_netdev(filter_dev) && + ice_is_port_repr_netdev(target_dev)) { + repr = ice_netdev_to_repr(target_dev); + + fltr->dest_vsi = repr->src_vsi; + fltr->direction = ICE_ESWITCH_FLTR_EGRESS; + } else if (ice_is_port_repr_netdev(filter_dev) && + ice_tc_is_dev_uplink(target_dev)) { + repr = ice_netdev_to_repr(filter_dev); + + fltr->dest_vsi = repr->src_vsi->back->eswitch.uplink_vsi; + fltr->direction = ICE_ESWITCH_FLTR_EGRESS; + } else if (ice_tc_is_dev_uplink(filter_dev) && + ice_is_port_repr_netdev(target_dev)) { + repr = ice_netdev_to_repr(target_dev); + + fltr->dest_vsi = repr->src_vsi; + fltr->direction = ICE_ESWITCH_FLTR_INGRESS; + } else { + NL_SET_ERR_MSG_MOD(fltr->extack, + "Unsupported netdevice in switchdev mode"); + return -EINVAL; + } + + return 0; +} + static int ice_eswitch_tc_parse_action(struct net_device *filter_dev, struct ice_tc_flower_fltr *fltr, struct flow_action_entry *act) @@ -710,6 +752,12 @@ static int ice_eswitch_tc_parse_action(struct net_device *filter_dev, break; + case FLOW_ACTION_MIRRED: + err = ice_tc_setup_mirror_action(filter_dev, fltr, act->dev); + if (err) + return err; + break; + default: NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported action in switchdev mode"); return -EINVAL; @@ -731,7 +779,7 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) int ret; int i; - if (!flags || (flags & ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT)) { + if (flags & ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT) { NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported encap field(s)"); return -EOPNOTSUPP; } @@ -765,7 +813,7 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) rule_info.sw_act.src = hw->pf_id; rule_info.flags_info.act = ICE_SINGLE_ACT_LB_ENABLE; } else if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS && - fltr->dest_vsi == vsi->back->switchdev.uplink_vsi) { + fltr->dest_vsi == vsi->back->eswitch.uplink_vsi) { /* VF to Uplink */ rule_info.sw_act.flag |= ICE_FLTR_TX; rule_info.sw_act.src = vsi->idx; @@ -779,6 +827,7 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) /* specify the cookie as filter_rule_id */ rule_info.fltr_rule_id = fltr->cookie; + rule_info.src_vsi = vsi->idx; ret = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added); if (ret == -EEXIST) { @@ -1440,7 +1489,10 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi, (BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID) | - BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS))) { + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS) | + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IP) | + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_OPTS) | + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL))) { NL_SET_ERR_MSG_MOD(fltr->extack, "Tunnel key used, but device isn't a tunnel"); return -EOPNOTSUPP; } else { diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 9170a3e8f0..97d41d6ebf 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -552,13 +552,14 @@ ice_rx_frame_truesize(struct ice_rx_ring *rx_ring, const unsigned int size) * @xdp_prog: XDP program to run * @xdp_ring: ring to be used for XDP_TX action * @rx_buf: Rx buffer to store the XDP action + * @eop_desc: Last descriptor in packet to read metadata from * * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR} */ static void ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring, - struct ice_rx_buf *rx_buf) + struct ice_rx_buf *rx_buf, union ice_32b_rx_flex_desc *eop_desc) { unsigned int ret = ICE_XDP_PASS; u32 act; @@ -566,6 +567,8 @@ ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, if (!xdp_prog) goto exit; + ice_xdp_meta_set_desc(xdp, eop_desc); + act = bpf_prog_run_xdp(xdp_prog, xdp); switch (act) { case XDP_PASS: @@ -1176,8 +1179,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) struct sk_buff *skb; unsigned int size; u16 stat_err_bits; - u16 vlan_tag = 0; - u16 rx_ptype; + u16 vlan_tci; /* get the Rx desc from Rx ring based on 'next_to_clean' */ rx_desc = ICE_RX_DESC(rx_ring, ntc); @@ -1237,7 +1239,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) if (ice_is_non_eop(rx_ring, rx_desc)) continue; - ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_buf); + ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_buf, rx_desc); if (rx_buf->act == ICE_XDP_PASS) goto construct_skb; total_rx_bytes += xdp_get_buff_len(xdp); @@ -1275,7 +1277,7 @@ construct_skb: continue; } - vlan_tag = ice_get_vlan_tag_from_rx_desc(rx_desc); + vlan_tci = ice_get_vlan_tci(rx_desc); /* pad the skb if needed, to make a valid ethernet frame */ if (eth_skb_pad(skb)) @@ -1285,14 +1287,11 @@ construct_skb: total_rx_bytes += skb->len; /* populate checksum, VLAN, and protocol */ - rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) & - ICE_RX_FLEX_DESC_PTYPE_M; - - ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); + ice_process_skb_fields(rx_ring, rx_desc, skb); ice_trace(clean_rx_irq_indicate, rx_ring, rx_desc, skb); /* send completed skb up the stack */ - ice_receive_skb(rx_ring, skb, vlan_tag); + ice_receive_skb(rx_ring, skb, vlan_tci); /* update budget accounting */ total_rx_pkts++; @@ -1493,9 +1492,9 @@ static void ice_set_wb_on_itr(struct ice_q_vector *q_vector) * be static in non-adaptive mode (user configured) */ wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), - ((ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) & - GLINT_DYN_CTL_ITR_INDX_M) | GLINT_DYN_CTL_INTENA_MSK_M | - GLINT_DYN_CTL_WB_ON_ITR_M); + FIELD_PREP(GLINT_DYN_CTL_ITR_INDX_M, ICE_ITR_NONE) | + FIELD_PREP(GLINT_DYN_CTL_INTENA_MSK_M, 1) | + FIELD_PREP(GLINT_DYN_CTL_WB_ON_ITR_M, 1)); q_vector->wb_on_itr = true; } diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h index b28b9826bb..af955b0e5d 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h @@ -257,6 +257,20 @@ enum ice_rx_dtype { ICE_RX_DTYPE_SPLIT_ALWAYS = 2, }; +struct ice_pkt_ctx { + u64 cached_phctime; + __be16 vlan_proto; +}; + +struct ice_xdp_buff { + struct xdp_buff xdp_buff; + const union ice_32b_rx_flex_desc *eop_desc; + const struct ice_pkt_ctx *pkt_ctx; +}; + +/* Required for compatibility with xdp_buffs from xsk_pool */ +static_assert(offsetof(struct ice_xdp_buff, xdp_buff) == 0); + /* indices into GLINT_ITR registers */ #define ICE_RX_ITR ICE_IDX_ITR0 #define ICE_TX_ITR ICE_IDX_ITR1 @@ -298,7 +312,6 @@ enum ice_dynamic_itr { /* descriptor ring, associated with a VSI */ struct ice_rx_ring { /* CL1 - 1st cacheline starts here */ - struct ice_rx_ring *next; /* pointer to next ring in q_vector */ void *desc; /* Descriptor ring memory */ struct device *dev; /* Used for DMA mapping */ struct net_device *netdev; /* netdev ring maps to */ @@ -310,13 +323,24 @@ struct ice_rx_ring { u16 count; /* Number of descriptors */ u16 reg_idx; /* HW register index of the ring */ u16 next_to_alloc; - /* CL2 - 2nd cacheline starts here */ + union { struct ice_rx_buf *rx_buf; struct xdp_buff **xdp_buf; }; - struct xdp_buff xdp; + /* CL2 - 2nd cacheline starts here */ + union { + struct ice_xdp_buff xdp_ext; + struct xdp_buff xdp; + }; /* CL3 - 3rd cacheline starts here */ + union { + struct ice_pkt_ctx pkt_ctx; + struct { + u64 cached_phctime; + __be16 vlan_proto; + }; + }; struct bpf_prog *xdp_prog; u16 rx_offset; @@ -332,10 +356,10 @@ struct ice_rx_ring { /* CL4 - 4th cacheline starts here */ struct ice_channel *ch; struct ice_tx_ring *xdp_ring; + struct ice_rx_ring *next; /* pointer to next ring in q_vector */ struct xsk_buff_pool *xsk_pool; u32 nr_frags; dma_addr_t dma; /* physical address of ring */ - u64 cached_phctime; u16 rx_buf_len; u8 dcb_tc; /* Traffic class of ring */ u8 ptp_rx; diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c index 7e06373e14..839e5da24a 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c @@ -63,28 +63,42 @@ static enum pkt_hash_types ice_ptype_to_htype(u16 ptype) } /** - * ice_rx_hash - set the hash value in the skb + * ice_get_rx_hash - get RX hash value from descriptor + * @rx_desc: specific descriptor + * + * Returns hash, if present, 0 otherwise. + */ +static u32 ice_get_rx_hash(const union ice_32b_rx_flex_desc *rx_desc) +{ + const struct ice_32b_rx_flex_desc_nic *nic_mdid; + + if (unlikely(rx_desc->wb.rxdid != ICE_RXDID_FLEX_NIC)) + return 0; + + nic_mdid = (struct ice_32b_rx_flex_desc_nic *)rx_desc; + return le32_to_cpu(nic_mdid->rss_hash); +} + +/** + * ice_rx_hash_to_skb - set the hash value in the skb * @rx_ring: descriptor ring * @rx_desc: specific descriptor * @skb: pointer to current skb * @rx_ptype: the ptype value from the descriptor */ static void -ice_rx_hash(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, - struct sk_buff *skb, u16 rx_ptype) +ice_rx_hash_to_skb(const struct ice_rx_ring *rx_ring, + const union ice_32b_rx_flex_desc *rx_desc, + struct sk_buff *skb, u16 rx_ptype) { - struct ice_32b_rx_flex_desc_nic *nic_mdid; u32 hash; if (!(rx_ring->netdev->features & NETIF_F_RXHASH)) return; - if (rx_desc->wb.rxdid != ICE_RXDID_FLEX_NIC) - return; - - nic_mdid = (struct ice_32b_rx_flex_desc_nic *)rx_desc; - hash = le32_to_cpu(nic_mdid->rss_hash); - skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype)); + hash = ice_get_rx_hash(rx_desc); + if (likely(hash)) + skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype)); } /** @@ -170,12 +184,39 @@ checksum_fail: ring->vsi->back->hw_csum_rx_error++; } +/** + * ice_ptp_rx_hwts_to_skb - Put RX timestamp into skb + * @rx_ring: Ring to get the VSI info + * @rx_desc: Receive descriptor + * @skb: Particular skb to send timestamp with + * + * The timestamp is in ns, so we must convert the result first. + */ +static void +ice_ptp_rx_hwts_to_skb(struct ice_rx_ring *rx_ring, + const union ice_32b_rx_flex_desc *rx_desc, + struct sk_buff *skb) +{ + u64 ts_ns = ice_ptp_get_rx_hwts(rx_desc, &rx_ring->pkt_ctx); + + skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ts_ns); +} + +/** + * ice_get_ptype - Read HW packet type from the descriptor + * @rx_desc: RX descriptor + */ +static u16 ice_get_ptype(const union ice_32b_rx_flex_desc *rx_desc) +{ + return le16_to_cpu(rx_desc->wb.ptype_flex_flags0) & + ICE_RX_FLEX_DESC_PTYPE_M; +} + /** * ice_process_skb_fields - Populate skb header fields from Rx descriptor * @rx_ring: Rx descriptor ring packet is being transacted on * @rx_desc: pointer to the EOP Rx descriptor * @skb: pointer to current skb being populated - * @ptype: the packet type decoded by hardware * * This function checks the ring, descriptor, and packet information in * order to populate the hash, checksum, VLAN, protocol, and @@ -184,9 +225,11 @@ checksum_fail: void ice_process_skb_fields(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, - struct sk_buff *skb, u16 ptype) + struct sk_buff *skb) { - ice_rx_hash(rx_ring, rx_desc, skb, ptype); + u16 ptype = ice_get_ptype(rx_desc); + + ice_rx_hash_to_skb(rx_ring, rx_desc, skb, ptype); /* modifies the skb - consumes the enet header */ skb->protocol = eth_type_trans(skb, rx_ring->netdev); @@ -194,28 +237,24 @@ ice_process_skb_fields(struct ice_rx_ring *rx_ring, ice_rx_csum(rx_ring, skb, rx_desc, ptype); if (rx_ring->ptp_rx) - ice_ptp_rx_hwtstamp(rx_ring, rx_desc, skb); + ice_ptp_rx_hwts_to_skb(rx_ring, rx_desc, skb); } /** * ice_receive_skb - Send a completed packet up the stack * @rx_ring: Rx ring in play * @skb: packet to send up - * @vlan_tag: VLAN tag for packet + * @vlan_tci: VLAN TCI for packet * * This function sends the completed packet (via. skb) up the stack using * gro receive functions (with/without VLAN tag) */ void -ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag) +ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tci) { - netdev_features_t features = rx_ring->netdev->features; - bool non_zero_vlan = !!(vlan_tag & VLAN_VID_MASK); - - if ((features & NETIF_F_HW_VLAN_CTAG_RX) && non_zero_vlan) - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); - else if ((features & NETIF_F_HW_VLAN_STAG_RX) && non_zero_vlan) - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), vlan_tag); + if ((vlan_tci & VLAN_VID_MASK) && rx_ring->vlan_proto) + __vlan_hwaccel_put_tag(skb, rx_ring->vlan_proto, + vlan_tci); napi_gro_receive(&rx_ring->q_vector->napi, skb); } @@ -464,3 +503,125 @@ void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res, spin_unlock(&xdp_ring->tx_lock); } } + +/** + * ice_xdp_rx_hw_ts - HW timestamp XDP hint handler + * @ctx: XDP buff pointer + * @ts_ns: destination address + * + * Copy HW timestamp (if available) to the destination address. + */ +static int ice_xdp_rx_hw_ts(const struct xdp_md *ctx, u64 *ts_ns) +{ + const struct ice_xdp_buff *xdp_ext = (void *)ctx; + + *ts_ns = ice_ptp_get_rx_hwts(xdp_ext->eop_desc, + xdp_ext->pkt_ctx); + if (!*ts_ns) + return -ENODATA; + + return 0; +} + +/* Define a ptype index -> XDP hash type lookup table. + * It uses the same ptype definitions as ice_decode_rx_desc_ptype[], + * avoiding possible copy-paste errors. + */ +#undef ICE_PTT +#undef ICE_PTT_UNUSED_ENTRY + +#define ICE_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\ + [PTYPE] = XDP_RSS_L3_##OUTER_IP_VER | XDP_RSS_L4_##I | XDP_RSS_TYPE_##PL + +#define ICE_PTT_UNUSED_ENTRY(PTYPE) [PTYPE] = 0 + +/* A few supplementary definitions for when XDP hash types do not coincide + * with what can be generated from ptype definitions + * by means of preprocessor concatenation. + */ +#define XDP_RSS_L3_NONE XDP_RSS_TYPE_NONE +#define XDP_RSS_L4_NONE XDP_RSS_TYPE_NONE +#define XDP_RSS_TYPE_PAY2 XDP_RSS_TYPE_L2 +#define XDP_RSS_TYPE_PAY3 XDP_RSS_TYPE_NONE +#define XDP_RSS_TYPE_PAY4 XDP_RSS_L4 + +static const enum xdp_rss_hash_type +ice_ptype_to_xdp_hash[ICE_NUM_DEFINED_PTYPES] = { + ICE_PTYPES +}; + +#undef XDP_RSS_L3_NONE +#undef XDP_RSS_L4_NONE +#undef XDP_RSS_TYPE_PAY2 +#undef XDP_RSS_TYPE_PAY3 +#undef XDP_RSS_TYPE_PAY4 + +#undef ICE_PTT +#undef ICE_PTT_UNUSED_ENTRY + +/** + * ice_xdp_rx_hash_type - Get XDP-specific hash type from the RX descriptor + * @eop_desc: End of Packet descriptor + */ +static enum xdp_rss_hash_type +ice_xdp_rx_hash_type(const union ice_32b_rx_flex_desc *eop_desc) +{ + u16 ptype = ice_get_ptype(eop_desc); + + if (unlikely(ptype >= ICE_NUM_DEFINED_PTYPES)) + return 0; + + return ice_ptype_to_xdp_hash[ptype]; +} + +/** + * ice_xdp_rx_hash - RX hash XDP hint handler + * @ctx: XDP buff pointer + * @hash: hash destination address + * @rss_type: XDP hash type destination address + * + * Copy RX hash (if available) and its type to the destination address. + */ +static int ice_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash, + enum xdp_rss_hash_type *rss_type) +{ + const struct ice_xdp_buff *xdp_ext = (void *)ctx; + + *hash = ice_get_rx_hash(xdp_ext->eop_desc); + *rss_type = ice_xdp_rx_hash_type(xdp_ext->eop_desc); + if (!likely(*hash)) + return -ENODATA; + + return 0; +} + +/** + * ice_xdp_rx_vlan_tag - VLAN tag XDP hint handler + * @ctx: XDP buff pointer + * @vlan_proto: destination address for VLAN protocol + * @vlan_tci: destination address for VLAN TCI + * + * Copy VLAN tag (if was stripped) and corresponding protocol + * to the destination address. + */ +static int ice_xdp_rx_vlan_tag(const struct xdp_md *ctx, __be16 *vlan_proto, + u16 *vlan_tci) +{ + const struct ice_xdp_buff *xdp_ext = (void *)ctx; + + *vlan_proto = xdp_ext->pkt_ctx->vlan_proto; + if (!*vlan_proto) + return -ENODATA; + + *vlan_tci = ice_get_vlan_tci(xdp_ext->eop_desc); + if (!*vlan_tci) + return -ENODATA; + + return 0; +} + +const struct xdp_metadata_ops ice_xdp_md_ops = { + .xmo_rx_timestamp = ice_xdp_rx_hw_ts, + .xmo_rx_hash = ice_xdp_rx_hash, + .xmo_rx_vlan_tag = ice_xdp_rx_vlan_tag, +}; diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h index b0e56675f9..afcead4bae 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h @@ -97,7 +97,7 @@ ice_build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag) } /** - * ice_get_vlan_tag_from_rx_desc - get VLAN from Rx flex descriptor + * ice_get_vlan_tci - get VLAN TCI from Rx flex descriptor * @rx_desc: Rx 32b flex descriptor with RXDID=2 * * The OS and current PF implementation only support stripping a single VLAN tag @@ -105,7 +105,7 @@ ice_build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag) * one is found return the tag, else return 0 to mean no VLAN tag was found. */ static inline u16 -ice_get_vlan_tag_from_rx_desc(union ice_32b_rx_flex_desc *rx_desc) +ice_get_vlan_tci(const union ice_32b_rx_flex_desc *rx_desc) { u16 stat_err_bits; @@ -161,7 +161,17 @@ void ice_release_rx_desc(struct ice_rx_ring *rx_ring, u16 val); void ice_process_skb_fields(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, - struct sk_buff *skb, u16 ptype); + struct sk_buff *skb); void -ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag); +ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tci); + +static inline void +ice_xdp_meta_set_desc(struct xdp_buff *xdp, + union ice_32b_rx_flex_desc *eop_desc) +{ + struct ice_xdp_buff *xdp_ext = container_of(xdp, struct ice_xdp_buff, + xdp_buff); + + xdp_ext->eop_desc = eop_desc; +} #endif /* !_ICE_TXRX_LIB_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index a18ca0ff87..a508e917ce 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -17,6 +17,7 @@ #include "ice_protocol_type.h" #include "ice_sbq_cmd.h" #include "ice_vlan_mode.h" +#include "ice_fwlog.h" static inline bool ice_is_tc_ena(unsigned long bitmap, u8 tc) { @@ -246,6 +247,7 @@ struct ice_fd_hw_prof { int cnt; u64 entry_h[ICE_MAX_FDIR_VSI_PER_FILTER][ICE_FD_HW_SEG_MAX]; u16 vsi_h[ICE_MAX_FDIR_VSI_PER_FILTER]; + u64 prof_id[ICE_FD_HW_SEG_MAX]; }; /* Common HW capabilities for SW use */ @@ -351,6 +353,7 @@ struct ice_ts_func_info { #define ICE_TS_TMR0_ENA_M BIT(25) #define ICE_TS_TMR1_ENA_M BIT(26) #define ICE_TS_LL_TX_TS_READ_M BIT(28) +#define ICE_TS_LL_TX_TS_INT_READ_M BIT(29) struct ice_ts_dev_info { /* Device specific info */ @@ -364,6 +367,7 @@ struct ice_ts_dev_info { u8 tmr0_ena; u8 tmr1_ena; u8 ts_ll_read; + u8 ts_ll_int_read; }; /* Function specific capabilities */ @@ -377,6 +381,8 @@ struct ice_hw_func_caps { struct ice_ts_func_info ts_func_info; }; +#define ICE_SENSOR_SUPPORT_E810_INT_TEMP_BIT 0 + /* Device wide capabilities */ struct ice_hw_dev_caps { struct ice_hw_common_caps common_cap; @@ -385,6 +391,11 @@ struct ice_hw_dev_caps { u32 num_flow_director_fltr; /* Number of FD filters available */ struct ice_ts_dev_info ts_dev_info; u32 num_funcs; + /* bitmap of supported sensors + * bit 0 - internal temperature sensor + * bit 31:1 - Reserved + */ + u32 supported_sensors; }; /* MAC info */ @@ -730,24 +741,6 @@ struct ice_switch_info { DECLARE_BITMAP(prof_res_bm[ICE_MAX_NUM_PROFILES], ICE_MAX_FV_WORDS); }; -/* FW logging configuration */ -struct ice_fw_log_evnt { - u8 cfg : 4; /* New event enables to configure */ - u8 cur : 4; /* Current/active event enables */ -}; - -struct ice_fw_log_cfg { - u8 cq_en : 1; /* FW logging is enabled via the control queue */ - u8 uart_en : 1; /* FW logging is enabled via UART for all PFs */ - u8 actv_evnts; /* Cumulation of currently enabled log events */ - -#define ICE_FW_LOG_EVNT_INFO (ICE_AQC_FW_LOG_INFO_EN >> ICE_AQC_FW_LOG_EN_S) -#define ICE_FW_LOG_EVNT_INIT (ICE_AQC_FW_LOG_INIT_EN >> ICE_AQC_FW_LOG_EN_S) -#define ICE_FW_LOG_EVNT_FLOW (ICE_AQC_FW_LOG_FLOW_EN >> ICE_AQC_FW_LOG_EN_S) -#define ICE_FW_LOG_EVNT_ERR (ICE_AQC_FW_LOG_ERR_EN >> ICE_AQC_FW_LOG_EN_S) - struct ice_fw_log_evnt evnts[ICE_AQC_FW_LOG_ID_MAX]; -}; - /* Enum defining the different states of the mailbox snapshot in the * PF-VF mailbox overflow detection algorithm. The snapshot can be in * states: @@ -827,7 +820,7 @@ struct ice_mbx_data { enum ice_phy_model { ICE_PHY_UNSUP = -1, ICE_PHY_E810 = 1, - ICE_PHY_E822, + ICE_PHY_E82X, }; /* Port hardware description */ @@ -889,7 +882,9 @@ struct ice_hw { u8 fw_patch; /* firmware patch version */ u32 fw_build; /* firmware build number */ - struct ice_fw_log_cfg fw_log; + struct ice_fwlog_cfg fwlog_cfg; + bool fwlog_supported; /* does hardware support FW logging? */ + struct ice_fwlog_ring fwlog_ring; /* Device max aggregate bandwidths corresponding to the GL_PWR_MODE_CTL * register. Used for determining the ITR/INTRL granularity during @@ -910,10 +905,9 @@ struct ice_hw { /* INTRL granularity in 1 us */ u8 intrl_gran; -#define ICE_PHY_PER_NAC_E822 1 #define ICE_MAX_QUAD 2 -#define ICE_QUADS_PER_PHY_E822 2 -#define ICE_PORTS_PER_PHY_E822 8 +#define ICE_QUADS_PER_PHY_E82X 2 +#define ICE_PORTS_PER_PHY_E82X 8 #define ICE_PORTS_PER_QUAD 4 #define ICE_PORTS_PER_PHY_E810 4 #define ICE_NUM_EXTERNAL_PORTS (ICE_MAX_QUAD * ICE_PORTS_PER_QUAD) @@ -1009,7 +1003,6 @@ struct ice_hw_port_stats { u64 error_bytes; /* errbc */ u64 mac_local_faults; /* mlfc */ u64 mac_remote_faults; /* mrfc */ - u64 rx_len_errors; /* rlec */ u64 link_xon_rx; /* lxonrxc */ u64 link_xoff_rx; /* lxoffrxc */ u64 link_xon_tx; /* lxontxc */ @@ -1048,6 +1041,7 @@ enum ice_sw_fwd_act_type { ICE_FWD_TO_Q, ICE_FWD_TO_QGRP, ICE_DROP_PACKET, + ICE_MIRROR_PACKET, ICE_NOP, ICE_INVAL_ACT }; @@ -1078,7 +1072,7 @@ struct ice_aq_get_set_rss_lut_params { #define ICE_OROM_VER_BUILD_SHIFT 8 #define ICE_OROM_VER_BUILD_MASK (0xffff << ICE_OROM_VER_BUILD_SHIFT) #define ICE_OROM_VER_SHIFT 24 -#define ICE_OROM_VER_MASK (0xff << ICE_OROM_VER_SHIFT) +#define ICE_OROM_VER_MASK (0xffU << ICE_OROM_VER_SHIFT) #define ICE_SR_PFA_PTR 0x40 #define ICE_SR_1ST_NVM_BANK_PTR 0x42 #define ICE_SR_NVM_BANK_SIZE 0x43 diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c index 88e3cd09f8..15ade19de5 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c @@ -775,6 +775,7 @@ void ice_reset_all_vfs(struct ice_pf *pf) ice_for_each_vf(pf, bkt, vf) { mutex_lock(&vf->cfg_lock); + ice_eswitch_detach(pf, vf); vf->driver_caps = 0; ice_vc_set_default_allowlist(vf); @@ -790,13 +791,11 @@ void ice_reset_all_vfs(struct ice_pf *pf) ice_vf_rebuild_vsi(vf); ice_vf_post_vsi_rebuild(vf); + ice_eswitch_attach(pf, vf); + mutex_unlock(&vf->cfg_lock); } - if (ice_is_eswitch_mode_switchdev(pf)) - if (ice_eswitch_rebuild(pf)) - dev_warn(dev, "eswitch rebuild failed\n"); - ice_flush(hw); clear_bit(ICE_VF_DIS, pf->state); @@ -864,6 +863,11 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags) return 0; } + if (flags & ICE_VF_RESET_LOCK) + mutex_lock(&vf->cfg_lock); + else + lockdep_assert_held(&vf->cfg_lock); + lag = pf->lag; mutex_lock(&pf->lag_mutex); if (lag && lag->bonded && lag->primary) { @@ -875,11 +879,6 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags) act_prt = ICE_LAG_INVALID_PORT; } - if (flags & ICE_VF_RESET_LOCK) - mutex_lock(&vf->cfg_lock); - else - lockdep_assert_held(&vf->cfg_lock); - if (ice_is_vf_disabled(vf)) { vsi = ice_get_vf_vsi(vf); if (!vsi) { @@ -958,20 +957,20 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags) goto out_unlock; } - ice_eswitch_update_repr(vsi); + ice_eswitch_update_repr(vf->repr_id, vsi); /* if the VF has been reset allow it to come up again */ ice_mbx_clear_malvf(&vf->mbx_info); out_unlock: - if (flags & ICE_VF_RESET_LOCK) - mutex_unlock(&vf->cfg_lock); - if (lag && lag->bonded && lag->primary && act_prt != ICE_LAG_INVALID_PORT) ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt); mutex_unlock(&pf->lag_mutex); + if (flags & ICE_VF_RESET_LOCK) + mutex_unlock(&vf->cfg_lock); + return err; } diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h index 6b41e0f3d3..0cc9034065 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h @@ -129,7 +129,7 @@ struct ice_vf { struct ice_mdd_vf_events mdd_tx_events; DECLARE_BITMAP(opcodes_allowlist, VIRTCHNL_OP_MAX); - struct ice_repr *repr; + unsigned long repr_id; const struct ice_virtchnl_ops *virtchnl_ops; const struct ice_vf_ops *vf_ops; diff --git a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c index 80dc4bcdd3..b3e1bdcb80 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c +++ b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c @@ -26,24 +26,22 @@ static void ice_port_vlan_on(struct ice_vsi *vsi) struct ice_vsi_vlan_ops *vlan_ops; struct ice_pf *pf = vsi->back; - if (ice_is_dvm_ena(&pf->hw)) { - vlan_ops = &vsi->outer_vlan_ops; - - /* setup outer VLAN ops */ - vlan_ops->set_port_vlan = ice_vsi_set_outer_port_vlan; - vlan_ops->clear_port_vlan = ice_vsi_clear_outer_port_vlan; + /* setup inner VLAN ops */ + vlan_ops = &vsi->inner_vlan_ops; - /* setup inner VLAN ops */ - vlan_ops = &vsi->inner_vlan_ops; + if (ice_is_dvm_ena(&pf->hw)) { vlan_ops->add_vlan = noop_vlan_arg; vlan_ops->del_vlan = noop_vlan_arg; vlan_ops->ena_stripping = ice_vsi_ena_inner_stripping; vlan_ops->dis_stripping = ice_vsi_dis_inner_stripping; vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion; vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion; - } else { - vlan_ops = &vsi->inner_vlan_ops; + /* setup outer VLAN ops */ + vlan_ops = &vsi->outer_vlan_ops; + vlan_ops->set_port_vlan = ice_vsi_set_outer_port_vlan; + vlan_ops->clear_port_vlan = ice_vsi_clear_outer_port_vlan; + } else { vlan_ops->set_port_vlan = ice_vsi_set_inner_port_vlan; vlan_ops->clear_port_vlan = ice_vsi_clear_inner_port_vlan; } diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c index d6348f2082..7b550d7d96 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c @@ -499,7 +499,7 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg) vfres->rss_lut_size = ICE_LUT_VSI_SIZE; vfres->max_mtu = ice_vc_get_max_frame_size(vf); - vfres->vsi_res[0].vsi_id = vf->lan_vsi_num; + vfres->vsi_res[0].vsi_id = ICE_VF_VSI_ID; vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV; vfres->vsi_res[0].num_queue_pairs = vsi->num_txq; ether_addr_copy(vfres->vsi_res[0].default_mac_addr, @@ -545,12 +545,7 @@ static void ice_vc_reset_vf_msg(struct ice_vf *vf) */ bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id) { - struct ice_pf *pf = vf->pf; - struct ice_vsi *vsi; - - vsi = ice_find_vsi(pf, vsi_id); - - return (vsi && (vsi->vf == vf)); + return vsi_id == ICE_VF_VSI_ID; } /** @@ -682,9 +677,7 @@ out: * a specific virtchnl RSS cfg * @hw: pointer to the hardware * @rss_cfg: pointer to the virtchnl RSS cfg - * @addl_hdrs: pointer to the protocol header fields (ICE_FLOW_SEG_HDR_*) - * to configure - * @hash_flds: pointer to the hash bit fields (ICE_FLOW_HASH_*) to configure + * @hash_cfg: pointer to the HW hash configuration * * Return true if all the protocol header and hash fields in the RSS cfg could * be parsed, else return false @@ -692,13 +685,23 @@ out: * This function parses the virtchnl RSS cfg to be the intended * hash fields and the intended header for RSS configuration */ -static bool -ice_vc_parse_rss_cfg(struct ice_hw *hw, struct virtchnl_rss_cfg *rss_cfg, - u32 *addl_hdrs, u64 *hash_flds) +static bool ice_vc_parse_rss_cfg(struct ice_hw *hw, + struct virtchnl_rss_cfg *rss_cfg, + struct ice_rss_hash_cfg *hash_cfg) { const struct ice_vc_hash_field_match_type *hf_list; const struct ice_vc_hdr_match_type *hdr_list; int i, hf_list_len, hdr_list_len; + u32 *addl_hdrs = &hash_cfg->addl_hdrs; + u64 *hash_flds = &hash_cfg->hash_flds; + + /* set outer layer RSS as default */ + hash_cfg->hdr_type = ICE_RSS_OUTER_HEADERS; + + if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC) + hash_cfg->symm = true; + else + hash_cfg->symm = false; hf_list = ice_vc_hash_field_list; hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list); @@ -849,18 +852,24 @@ static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add) kfree(ctx); } else { - u32 addl_hdrs = ICE_FLOW_SEG_HDR_NONE; - u64 hash_flds = ICE_HASH_INVALID; + struct ice_rss_hash_cfg cfg; + + /* Only check for none raw pattern case */ + if (!ice_vc_validate_pattern(vf, &rss_cfg->proto_hdrs)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + cfg.addl_hdrs = ICE_FLOW_SEG_HDR_NONE; + cfg.hash_flds = ICE_HASH_INVALID; + cfg.hdr_type = ICE_RSS_ANY_HEADERS; - if (!ice_vc_parse_rss_cfg(hw, rss_cfg, &addl_hdrs, - &hash_flds)) { + if (!ice_vc_parse_rss_cfg(hw, rss_cfg, &cfg)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } if (add) { - if (ice_add_rss_cfg(hw, vsi->idx, hash_flds, - addl_hdrs)) { + if (ice_add_rss_cfg(hw, vsi, &cfg)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; dev_err(dev, "ice_add_rss_cfg failed for vsi = %d, v_ret = %d\n", vsi->vsi_num, v_ret); @@ -868,8 +877,7 @@ static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add) } else { int status; - status = ice_rem_rss_cfg(hw, vsi->idx, hash_flds, - addl_hdrs); + status = ice_rem_rss_cfg(hw, vsi->idx, &cfg); /* We just ignore -ENOENT, because if two configurations * share the same profile remove one of them actually * removes both, since the profile is deleted. @@ -979,6 +987,51 @@ error_param: NULL, 0); } +/** + * ice_vc_config_rss_hfunc + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * + * Configure the VF's RSS Hash function + */ +static int ice_vc_config_rss_hfunc(struct ice_vf *vf, u8 *msg) +{ + struct virtchnl_rss_hfunc *vrh = (struct virtchnl_rss_hfunc *)msg; + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + u8 hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ; + struct ice_vsi *vsi; + + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + if (!ice_vc_isvalid_vsi_id(vf, vrh->vsi_id)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + vsi = ice_get_vf_vsi(vf); + if (!vsi) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + if (vrh->rss_algorithm == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC) + hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ; + + if (ice_set_rss_hfunc(vsi, hfunc)) + v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; +error_param: + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_HFUNC, v_ret, + NULL, 0); +} + /** * ice_vc_cfg_promiscuous_mode_msg * @vf: pointer to the VF info @@ -2625,7 +2678,7 @@ static int ice_vc_set_rss_hena(struct ice_vf *vf, u8 *msg) } if (vrh->hena) { - status = ice_add_avf_rss_cfg(&pf->hw, vsi->idx, vrh->hena); + status = ice_add_avf_rss_cfg(&pf->hw, vsi, vrh->hena); v_ret = ice_err_to_virt_err(status); } @@ -3037,7 +3090,7 @@ static struct ice_vlan ice_vc_to_vlan(struct virtchnl_vlan *vc_vlan) { struct ice_vlan vlan = { 0 }; - vlan.prio = (vc_vlan->tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; + vlan.prio = FIELD_GET(VLAN_PRIO_MASK, vc_vlan->tci); vlan.vid = vc_vlan->tci & VLAN_VID_MASK; vlan.tpid = vc_vlan->tpid; @@ -3746,6 +3799,7 @@ static const struct ice_virtchnl_ops ice_virtchnl_dflt_ops = { .cfg_irq_map_msg = ice_vc_cfg_irq_map_msg, .config_rss_key = ice_vc_config_rss_key, .config_rss_lut = ice_vc_config_rss_lut, + .config_rss_hfunc = ice_vc_config_rss_hfunc, .get_stats_msg = ice_vc_get_stats_msg, .cfg_promiscuous_mode_msg = ice_vc_cfg_promiscuous_mode_msg, .add_vlan_msg = ice_vc_add_vlan_msg, @@ -3875,6 +3929,7 @@ static const struct ice_virtchnl_ops ice_virtchnl_repr_ops = { .cfg_irq_map_msg = ice_vc_cfg_irq_map_msg, .config_rss_key = ice_vc_config_rss_key, .config_rss_lut = ice_vc_config_rss_lut, + .config_rss_hfunc = ice_vc_config_rss_hfunc, .get_stats_msg = ice_vc_get_stats_msg, .cfg_promiscuous_mode_msg = ice_vc_repr_cfg_promiscuous_mode, .add_vlan_msg = ice_vc_add_vlan_msg, @@ -4057,6 +4112,9 @@ error_handler: case VIRTCHNL_OP_CONFIG_RSS_LUT: err = ops->config_rss_lut(vf, msg); break; + case VIRTCHNL_OP_CONFIG_RSS_HFUNC: + err = ops->config_rss_hfunc(vf, msg); + break; case VIRTCHNL_OP_GET_STATS: err = ops->get_stats_msg(vf, msg); break; diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.h b/drivers/net/ethernet/intel/ice/ice_virtchnl.h index cd747718de..3a41158691 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl.h +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.h @@ -19,6 +19,15 @@ #define ICE_MAX_MACADDR_PER_VF 18 #define ICE_FLEX_DESC_RXDID_MAX_NUM 64 +/* VFs only get a single VSI. For ice hardware, the VF does not need to know + * its VSI index. However, the virtchnl interface requires a VSI number, + * mainly due to legacy hardware. + * + * Since the VF doesn't need this information, report a static value to the VF + * instead of leaking any information about the PF or hardware setup. + */ +#define ICE_VF_VSI_ID 1 + struct ice_virtchnl_ops { int (*get_ver_msg)(struct ice_vf *vf, u8 *msg); int (*get_vf_res_msg)(struct ice_vf *vf, u8 *msg); @@ -32,6 +41,7 @@ struct ice_virtchnl_ops { int (*cfg_irq_map_msg)(struct ice_vf *vf, u8 *msg); int (*config_rss_key)(struct ice_vf *vf, u8 *msg); int (*config_rss_lut)(struct ice_vf *vf, u8 *msg); + int (*config_rss_hfunc)(struct ice_vf *vf, u8 *msg); int (*get_stats_msg)(struct ice_vf *vf, u8 *msg); int (*cfg_promiscuous_mode_msg)(struct ice_vf *vf, u8 *msg); int (*add_vlan_msg)(struct ice_vf *vf, u8 *msg); diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c index 588b77f1a4..d796dbd2a4 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c @@ -66,6 +66,7 @@ static const u32 vlan_v2_allowlist_opcodes[] = { static const u32 rss_pf_allowlist_opcodes[] = { VIRTCHNL_OP_CONFIG_RSS_KEY, VIRTCHNL_OP_CONFIG_RSS_LUT, VIRTCHNL_OP_GET_RSS_HENA_CAPS, VIRTCHNL_OP_SET_RSS_HENA, + VIRTCHNL_OP_CONFIG_RSS_HFUNC, }; /* VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC */ diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c index 24b23b7ef0..f001553e1a 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c @@ -10,19 +10,6 @@ #define to_fltr_conf_from_desc(p) \ container_of(p, struct virtchnl_fdir_fltr_conf, input) -#define ICE_FLOW_PROF_TYPE_S 0 -#define ICE_FLOW_PROF_TYPE_M (0xFFFFFFFFULL << ICE_FLOW_PROF_TYPE_S) -#define ICE_FLOW_PROF_VSI_S 32 -#define ICE_FLOW_PROF_VSI_M (0xFFFFFFFFULL << ICE_FLOW_PROF_VSI_S) - -/* Flow profile ID format: - * [0:31] - flow type, flow + tun_offs - * [32:63] - VSI index - */ -#define ICE_FLOW_PROF_FD(vsi, flow, tun_offs) \ - ((u64)(((((flow) + (tun_offs)) & ICE_FLOW_PROF_TYPE_M)) | \ - (((u64)(vsi) << ICE_FLOW_PROF_VSI_S) & ICE_FLOW_PROF_VSI_M))) - #define GTPU_TEID_OFFSET 4 #define GTPU_EH_QFI_OFFSET 1 #define GTPU_EH_QFI_MASK 0x3F @@ -493,6 +480,7 @@ ice_vc_fdir_rem_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, int tun) return; vf_prof = fdir->fdir_prof[flow]; + prof_id = vf_prof->prof_id[tun]; vf_vsi = ice_get_vf_vsi(vf); if (!vf_vsi) { @@ -503,9 +491,6 @@ ice_vc_fdir_rem_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, int tun) if (!fdir->prof_entry_cnt[flow][tun]) return; - prof_id = ICE_FLOW_PROF_FD(vf_vsi->vsi_num, - flow, tun ? ICE_FLTR_PTYPE_MAX : 0); - for (i = 0; i < fdir->prof_entry_cnt[flow][tun]; i++) if (vf_prof->entry_h[i][tun]) { u16 vsi_num = ice_get_hw_vsi_num(hw, vf_prof->vsi_h[i]); @@ -647,7 +632,6 @@ ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, struct ice_hw *hw; u64 entry1_h = 0; u64 entry2_h = 0; - u64 prof_id; int ret; pf = vf->pf; @@ -681,18 +665,15 @@ ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, ice_vc_fdir_rem_prof(vf, flow, tun); } - prof_id = ICE_FLOW_PROF_FD(vf_vsi->vsi_num, flow, - tun ? ICE_FLTR_PTYPE_MAX : 0); - - ret = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg, - tun + 1, &prof); + ret = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, seg, + tun + 1, false, &prof); if (ret) { dev_dbg(dev, "Could not add VSI flow 0x%x for VF %d\n", flow, vf->vf_id); goto err_exit; } - ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx, + ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, vf_vsi->idx, vf_vsi->idx, ICE_FLOW_PRIO_NORMAL, seg, &entry1_h); if (ret) { @@ -701,7 +682,7 @@ ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, goto err_prof; } - ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx, + ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, vf_vsi->idx, ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL, seg, &entry2_h); if (ret) { @@ -725,14 +706,16 @@ ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, vf_prof->cnt++; fdir->prof_entry_cnt[flow][tun]++; + vf_prof->prof_id[tun] = prof->id; + return 0; err_entry_1: ice_rem_prof_id_flow(hw, ICE_BLK_FD, - ice_get_hw_vsi_num(hw, vf_vsi->idx), prof_id); + ice_get_hw_vsi_num(hw, vf_vsi->idx), prof->id); ice_flow_rem_entry(hw, ICE_BLK_FD, entry1_h); err_prof: - ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id); + ice_flow_rem_prof(hw, ICE_BLK_FD, prof->id); err_exit: return ret; } @@ -1480,16 +1463,15 @@ ice_vf_verify_rx_desc(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx, int ret; stat_err = le16_to_cpu(ctx->rx_desc.wb.status_error0); - if (((stat_err & ICE_FXD_FLTR_WB_QW1_DD_M) >> - ICE_FXD_FLTR_WB_QW1_DD_S) != ICE_FXD_FLTR_WB_QW1_DD_YES) { + if (FIELD_GET(ICE_FXD_FLTR_WB_QW1_DD_M, stat_err) != + ICE_FXD_FLTR_WB_QW1_DD_YES) { *status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; dev_err(dev, "VF %d: Desc Done not set\n", vf->vf_id); ret = -EINVAL; goto err_exit; } - prog_id = (stat_err & ICE_FXD_FLTR_WB_QW1_PROG_ID_M) >> - ICE_FXD_FLTR_WB_QW1_PROG_ID_S; + prog_id = FIELD_GET(ICE_FXD_FLTR_WB_QW1_PROG_ID_M, stat_err); if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD && ctx->v_opcode != VIRTCHNL_OP_ADD_FDIR_FILTER) { dev_err(dev, "VF %d: Desc show add, but ctx not", @@ -1508,8 +1490,7 @@ ice_vf_verify_rx_desc(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx, goto err_exit; } - error = (stat_err & ICE_FXD_FLTR_WB_QW1_FAIL_M) >> - ICE_FXD_FLTR_WB_QW1_FAIL_S; + error = FIELD_GET(ICE_FXD_FLTR_WB_QW1_FAIL_M, stat_err); if (error == ICE_FXD_FLTR_WB_QW1_FAIL_YES) { if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD) { dev_err(dev, "VF %d, Failed to add FDIR rule due to no space in the table", @@ -1524,8 +1505,7 @@ ice_vf_verify_rx_desc(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx, goto err_exit; } - error = (stat_err & ICE_FXD_FLTR_WB_QW1_FAIL_PROF_M) >> - ICE_FXD_FLTR_WB_QW1_FAIL_PROF_S; + error = FIELD_GET(ICE_FXD_FLTR_WB_QW1_FAIL_PROF_M, stat_err); if (error == ICE_FXD_FLTR_WB_QW1_FAIL_PROF_YES) { dev_err(dev, "VF %d: Profile matching error", vf->vf_id); *status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c index 8307902115..2e9ad27cb9 100644 --- a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c @@ -487,10 +487,11 @@ int ice_vsi_ena_outer_stripping(struct ice_vsi *vsi, u16 tpid) ctxt->info.outer_vlan_flags = vsi->info.outer_vlan_flags & ~(ICE_AQ_VSI_OUTER_VLAN_EMODE_M | ICE_AQ_VSI_OUTER_TAG_TYPE_M); ctxt->info.outer_vlan_flags |= - ((ICE_AQ_VSI_OUTER_VLAN_EMODE_SHOW_BOTH << - ICE_AQ_VSI_OUTER_VLAN_EMODE_S) | - ((tag_type << ICE_AQ_VSI_OUTER_TAG_TYPE_S) & - ICE_AQ_VSI_OUTER_TAG_TYPE_M)); + /* we want EMODE_SHOW_BOTH, but that value is zero, so the line + * above clears it well enough that we don't need to try to set + * zero here, so just do the tag type + */ + FIELD_PREP(ICE_AQ_VSI_OUTER_TAG_TYPE_M, tag_type); err = ice_update_vsi(hw, vsi->idx, ctxt, NULL); if (err) @@ -595,11 +596,9 @@ int ice_vsi_ena_outer_insertion(struct ice_vsi *vsi, u16 tpid) ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M | ICE_AQ_VSI_OUTER_TAG_TYPE_M); ctxt->info.outer_vlan_flags |= - ((ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL << - ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S) & - ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M) | - ((tag_type << ICE_AQ_VSI_OUTER_TAG_TYPE_S) & - ICE_AQ_VSI_OUTER_TAG_TYPE_M); + FIELD_PREP(ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M, + ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL) | + FIELD_PREP(ICE_AQ_VSI_OUTER_TAG_TYPE_M, tag_type); err = ice_update_vsi(hw, vsi->idx, ctxt, NULL); if (err) @@ -648,9 +647,8 @@ int ice_vsi_dis_outer_insertion(struct ice_vsi *vsi) ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M); ctxt->info.outer_vlan_flags |= ICE_AQ_VSI_OUTER_VLAN_BLOCK_TX_DESC | - ((ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL << - ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S) & - ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M); + FIELD_PREP(ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M, + ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL); err = ice_update_vsi(hw, vsi->idx, ctxt, NULL); if (err) @@ -708,8 +706,7 @@ __ice_vsi_set_outer_port_vlan(struct ice_vsi *vsi, u16 vlan_info, u16 tpid) ctxt->info.outer_vlan_flags = (ICE_AQ_VSI_OUTER_VLAN_EMODE_SHOW << ICE_AQ_VSI_OUTER_VLAN_EMODE_S) | - ((tag_type << ICE_AQ_VSI_OUTER_TAG_TYPE_S) & - ICE_AQ_VSI_OUTER_TAG_TYPE_M) | + FIELD_PREP(ICE_AQ_VSI_OUTER_TAG_TYPE_M, tag_type) | ICE_AQ_VSI_OUTER_VLAN_BLOCK_TX_DESC | (ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ACCEPTUNTAGGED << ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S) | diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index 0fd5551b10..2eecd0f39a 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -459,6 +459,11 @@ static u16 ice_fill_rx_descs(struct xsk_buff_pool *pool, struct xdp_buff **xdp, rx_desc->read.pkt_addr = cpu_to_le64(dma); rx_desc->wb.status_error0 = 0; + /* Put private info that changes on a per-packet basis + * into xdp_buff_xsk->cb. + */ + ice_xdp_meta_set_desc(*xdp, rx_desc); + rx_desc++; xdp++; } @@ -865,8 +870,7 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget) struct xdp_buff *xdp; struct sk_buff *skb; u16 stat_err_bits; - u16 vlan_tag = 0; - u16 rx_ptype; + u16 vlan_tci; rx_desc = ICE_RX_DESC(rx_ring, ntc); @@ -943,13 +947,10 @@ construct_skb: total_rx_bytes += skb->len; total_rx_packets++; - vlan_tag = ice_get_vlan_tag_from_rx_desc(rx_desc); - - rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) & - ICE_RX_FLEX_DESC_PTYPE_M; + vlan_tci = ice_get_vlan_tci(rx_desc); - ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); - ice_receive_skb(rx_ring, skb, vlan_tag); + ice_process_skb_fields(rx_ring, rx_desc, skb); + ice_receive_skb(rx_ring, skb, vlan_tci); } rx_ring->next_to_clean = ntc; -- cgit v1.2.3