diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 18:49:45 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 18:49:45 +0000 |
commit | 2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch) | |
tree | 848558de17fb3008cdf4d861b01ac7781903ce39 /drivers/net/ethernet/hisilicon/hns3/hns3pf | |
parent | Initial commit. (diff) | |
download | linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip |
Adding upstream version 6.1.76.upstream/6.1.76upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/net/ethernet/hisilicon/hns3/hns3pf')
19 files changed, 27279 insertions, 0 deletions
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h new file mode 100644 index 000000000..43cada51d --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -0,0 +1,881 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +// Copyright (c) 2016-2017 Hisilicon Limited. + +#ifndef __HCLGE_CMD_H +#define __HCLGE_CMD_H +#include <linux/types.h> +#include <linux/io.h> +#include <linux/etherdevice.h> +#include "hnae3.h" +#include "hclge_comm_cmd.h" + +struct hclge_dev; + +#define HCLGE_CMDQ_RX_INVLD_B 0 +#define HCLGE_CMDQ_RX_OUTVLD_B 1 + +struct hclge_misc_vector { + u8 __iomem *addr; + int vector_irq; + char name[HNAE3_INT_NAME_LEN]; +}; + +#define hclge_cmd_setup_basic_desc(desc, opcode, is_read) \ + hclge_comm_cmd_setup_basic_desc(desc, opcode, is_read) + +#define HCLGE_TQP_REG_OFFSET 0x80000 +#define HCLGE_TQP_REG_SIZE 0x200 + +#define HCLGE_TQP_MAX_SIZE_DEV_V2 1024 +#define HCLGE_TQP_EXT_REG_OFFSET 0x100 + +#define HCLGE_RCB_INIT_QUERY_TIMEOUT 10 +#define HCLGE_RCB_INIT_FLAG_EN_B 0 +#define HCLGE_RCB_INIT_FLAG_FINI_B 8 +struct hclge_config_rcb_init_cmd { + __le16 rcb_init_flag; + u8 rsv[22]; +}; + +struct hclge_tqp_map_cmd { + __le16 tqp_id; /* Absolute tqp id for in this pf */ + u8 tqp_vf; /* VF id */ +#define HCLGE_TQP_MAP_TYPE_PF 0 +#define HCLGE_TQP_MAP_TYPE_VF 1 +#define HCLGE_TQP_MAP_TYPE_B 0 +#define HCLGE_TQP_MAP_EN_B 1 + u8 tqp_flag; /* Indicate it's pf or vf tqp */ + __le16 tqp_vid; /* Virtual id in this pf/vf */ + u8 rsv[18]; +}; + +#define HCLGE_VECTOR_ELEMENTS_PER_CMD 10 + +enum hclge_int_type { + HCLGE_INT_TX, + HCLGE_INT_RX, + HCLGE_INT_EVENT, +}; + +struct hclge_ctrl_vector_chain_cmd { +#define HCLGE_VECTOR_ID_L_S 0 +#define HCLGE_VECTOR_ID_L_M GENMASK(7, 0) + u8 int_vector_id_l; + u8 int_cause_num; +#define HCLGE_INT_TYPE_S 0 +#define HCLGE_INT_TYPE_M GENMASK(1, 0) +#define HCLGE_TQP_ID_S 2 +#define HCLGE_TQP_ID_M GENMASK(12, 2) +#define HCLGE_INT_GL_IDX_S 13 +#define HCLGE_INT_GL_IDX_M GENMASK(14, 13) + __le16 tqp_type_and_id[HCLGE_VECTOR_ELEMENTS_PER_CMD]; + u8 vfid; +#define HCLGE_VECTOR_ID_H_S 8 +#define HCLGE_VECTOR_ID_H_M GENMASK(15, 8) + u8 int_vector_id_h; +}; + +#define HCLGE_MAX_TC_NUM 8 +#define HCLGE_TC0_PRI_BUF_EN_B 15 /* Bit 15 indicate enable or not */ +#define HCLGE_BUF_UNIT_S 7 /* Buf size is united by 128 bytes */ +struct hclge_tx_buff_alloc_cmd { + __le16 tx_pkt_buff[HCLGE_MAX_TC_NUM]; + u8 tx_buff_rsv[8]; +}; + +struct hclge_rx_priv_buff_cmd { + __le16 buf_num[HCLGE_MAX_TC_NUM]; + __le16 shared_buf; + u8 rsv[6]; +}; + +#define HCLGE_RX_PRIV_EN_B 15 +#define HCLGE_TC_NUM_ONE_DESC 4 +struct hclge_priv_wl { + __le16 high; + __le16 low; +}; + +struct hclge_rx_priv_wl_buf { + struct hclge_priv_wl tc_wl[HCLGE_TC_NUM_ONE_DESC]; +}; + +struct hclge_rx_com_thrd { + struct hclge_priv_wl com_thrd[HCLGE_TC_NUM_ONE_DESC]; +}; + +struct hclge_rx_com_wl { + struct hclge_priv_wl com_wl; +}; + +struct hclge_waterline { + u32 low; + u32 high; +}; + +struct hclge_tc_thrd { + u32 low; + u32 high; +}; + +struct hclge_priv_buf { + struct hclge_waterline wl; /* Waterline for low and high */ + u32 buf_size; /* TC private buffer size */ + u32 tx_buf_size; + u32 enable; /* Enable TC private buffer or not */ +}; + +struct hclge_shared_buf { + struct hclge_waterline self; + struct hclge_tc_thrd tc_thrd[HCLGE_MAX_TC_NUM]; + u32 buf_size; +}; + +struct hclge_pkt_buf_alloc { + struct hclge_priv_buf priv_buf[HCLGE_MAX_TC_NUM]; + struct hclge_shared_buf s_buf; +}; + +#define HCLGE_RX_COM_WL_EN_B 15 +struct hclge_rx_com_wl_buf_cmd { + __le16 high_wl; + __le16 low_wl; + u8 rsv[20]; +}; + +#define HCLGE_RX_PKT_EN_B 15 +struct hclge_rx_pkt_buf_cmd { + __le16 high_pkt; + __le16 low_pkt; + u8 rsv[20]; +}; + +#define HCLGE_PF_STATE_DONE_B 0 +#define HCLGE_PF_STATE_MAIN_B 1 +#define HCLGE_PF_STATE_BOND_B 2 +#define HCLGE_PF_STATE_MAC_N_B 6 +#define HCLGE_PF_MAC_NUM_MASK 0x3 +#define HCLGE_PF_STATE_MAIN BIT(HCLGE_PF_STATE_MAIN_B) +#define HCLGE_PF_STATE_DONE BIT(HCLGE_PF_STATE_DONE_B) +#define HCLGE_VF_RST_STATUS_CMD 4 + +struct hclge_func_status_cmd { + __le32 vf_rst_state[HCLGE_VF_RST_STATUS_CMD]; + u8 pf_state; + u8 mac_id; + u8 rsv1; + u8 pf_cnt_in_mac; + u8 pf_num; + u8 vf_num; + u8 rsv[2]; +}; + +struct hclge_pf_res_cmd { + __le16 tqp_num; + __le16 buf_size; + __le16 msixcap_localid_ba_nic; + __le16 msixcap_localid_number_nic; + __le16 pf_intr_vector_number_roce; + __le16 pf_own_fun_number; + __le16 tx_buf_size; + __le16 dv_buf_size; + __le16 ext_tqp_num; + u8 rsv[6]; +}; + +#define HCLGE_CFG_OFFSET_S 0 +#define HCLGE_CFG_OFFSET_M GENMASK(19, 0) +#define HCLGE_CFG_RD_LEN_S 24 +#define HCLGE_CFG_RD_LEN_M GENMASK(27, 24) +#define HCLGE_CFG_RD_LEN_BYTES 16 +#define HCLGE_CFG_RD_LEN_UNIT 4 + +#define HCLGE_CFG_TC_NUM_S 8 +#define HCLGE_CFG_TC_NUM_M GENMASK(15, 8) +#define HCLGE_CFG_TQP_DESC_N_S 16 +#define HCLGE_CFG_TQP_DESC_N_M GENMASK(31, 16) +#define HCLGE_CFG_PHY_ADDR_S 0 +#define HCLGE_CFG_PHY_ADDR_M GENMASK(7, 0) +#define HCLGE_CFG_MEDIA_TP_S 8 +#define HCLGE_CFG_MEDIA_TP_M GENMASK(15, 8) +#define HCLGE_CFG_RX_BUF_LEN_S 16 +#define HCLGE_CFG_RX_BUF_LEN_M GENMASK(31, 16) +#define HCLGE_CFG_MAC_ADDR_H_S 0 +#define HCLGE_CFG_MAC_ADDR_H_M GENMASK(15, 0) +#define HCLGE_CFG_DEFAULT_SPEED_S 16 +#define HCLGE_CFG_DEFAULT_SPEED_M GENMASK(23, 16) +#define HCLGE_CFG_RSS_SIZE_S 24 +#define HCLGE_CFG_RSS_SIZE_M GENMASK(31, 24) +#define HCLGE_CFG_SPEED_ABILITY_S 0 +#define HCLGE_CFG_SPEED_ABILITY_M GENMASK(7, 0) +#define HCLGE_CFG_SPEED_ABILITY_EXT_S 10 +#define HCLGE_CFG_SPEED_ABILITY_EXT_M GENMASK(15, 10) +#define HCLGE_CFG_VLAN_FLTR_CAP_S 8 +#define HCLGE_CFG_VLAN_FLTR_CAP_M GENMASK(9, 8) +#define HCLGE_CFG_UMV_TBL_SPACE_S 16 +#define HCLGE_CFG_UMV_TBL_SPACE_M GENMASK(31, 16) +#define HCLGE_CFG_PF_RSS_SIZE_S 0 +#define HCLGE_CFG_PF_RSS_SIZE_M GENMASK(3, 0) +#define HCLGE_CFG_TX_SPARE_BUF_SIZE_S 4 +#define HCLGE_CFG_TX_SPARE_BUF_SIZE_M GENMASK(15, 4) + +#define HCLGE_CFG_CMD_CNT 4 + +struct hclge_cfg_param_cmd { + __le32 offset; + __le32 rsv; + __le32 param[HCLGE_CFG_CMD_CNT]; +}; + +#define HCLGE_MAC_MODE 0x0 +#define HCLGE_DESC_NUM 0x40 + +#define HCLGE_ALLOC_VALID_B 0 +struct hclge_vf_num_cmd { + u8 alloc_valid; + u8 rsv[23]; +}; + +#define HCLGE_RSS_DEFAULT_OUTPORT_B 4 + +#define HCLGE_RSS_CFG_TBL_SIZE_H 4 +#define HCLGE_RSS_CFG_TBL_BW_L 8U + +#define HCLGE_RSS_TC_OFFSET_S 0 +#define HCLGE_RSS_TC_OFFSET_M GENMASK(10, 0) +#define HCLGE_RSS_TC_SIZE_MSB_B 11 +#define HCLGE_RSS_TC_SIZE_S 12 +#define HCLGE_RSS_TC_SIZE_M GENMASK(14, 12) +#define HCLGE_RSS_TC_SIZE_MSB_OFFSET 3 +#define HCLGE_RSS_TC_VALID_B 15 + +#define HCLGE_LINK_STATUS_UP_B 0 +#define HCLGE_LINK_STATUS_UP_M BIT(HCLGE_LINK_STATUS_UP_B) +struct hclge_link_status_cmd { + u8 status; + u8 rsv[23]; +}; + +/* for DEVICE_VERSION_V1/2, reference to promisc cmd byte8 */ +#define HCLGE_PROMISC_EN_UC 1 +#define HCLGE_PROMISC_EN_MC 2 +#define HCLGE_PROMISC_EN_BC 3 +#define HCLGE_PROMISC_TX_EN 4 +#define HCLGE_PROMISC_RX_EN 5 + +/* for DEVICE_VERSION_V3, reference to promisc cmd byte10 */ +#define HCLGE_PROMISC_UC_RX_EN 2 +#define HCLGE_PROMISC_MC_RX_EN 3 +#define HCLGE_PROMISC_BC_RX_EN 4 +#define HCLGE_PROMISC_UC_TX_EN 5 +#define HCLGE_PROMISC_MC_TX_EN 6 +#define HCLGE_PROMISC_BC_TX_EN 7 + +struct hclge_promisc_cfg_cmd { + u8 promisc; + u8 vf_id; + u8 extend_promisc; + u8 rsv0[21]; +}; + +enum hclge_promisc_type { + HCLGE_UNICAST = 1, + HCLGE_MULTICAST = 2, + HCLGE_BROADCAST = 3, +}; + +#define HCLGE_MAC_TX_EN_B 6 +#define HCLGE_MAC_RX_EN_B 7 +#define HCLGE_MAC_PAD_TX_B 11 +#define HCLGE_MAC_PAD_RX_B 12 +#define HCLGE_MAC_1588_TX_B 13 +#define HCLGE_MAC_1588_RX_B 14 +#define HCLGE_MAC_APP_LP_B 15 +#define HCLGE_MAC_LINE_LP_B 16 +#define HCLGE_MAC_FCS_TX_B 17 +#define HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B 18 +#define HCLGE_MAC_RX_FCS_STRIP_B 19 +#define HCLGE_MAC_RX_FCS_B 20 +#define HCLGE_MAC_TX_UNDER_MIN_ERR_B 21 +#define HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B 22 + +struct hclge_config_mac_mode_cmd { + __le32 txrx_pad_fcs_loop_en; + u8 rsv[20]; +}; + +struct hclge_pf_rst_sync_cmd { +#define HCLGE_PF_RST_ALL_VF_RDY_B 0 + u8 all_vf_ready; + u8 rsv[23]; +}; + +#define HCLGE_CFG_SPEED_S 0 +#define HCLGE_CFG_SPEED_M GENMASK(5, 0) + +#define HCLGE_CFG_DUPLEX_B 7 +#define HCLGE_CFG_DUPLEX_M BIT(HCLGE_CFG_DUPLEX_B) + +struct hclge_config_mac_speed_dup_cmd { + u8 speed_dup; + +#define HCLGE_CFG_MAC_SPEED_CHANGE_EN_B 0 + u8 mac_change_fec_en; + u8 rsv[4]; + u8 lane_num; + u8 rsv1[17]; +}; + +#define HCLGE_TQP_ENABLE_B 0 + +#define HCLGE_MAC_CFG_AN_EN_B 0 +#define HCLGE_MAC_CFG_AN_INT_EN_B 1 +#define HCLGE_MAC_CFG_AN_INT_MSK_B 2 +#define HCLGE_MAC_CFG_AN_INT_CLR_B 3 +#define HCLGE_MAC_CFG_AN_RST_B 4 + +#define HCLGE_MAC_CFG_AN_EN BIT(HCLGE_MAC_CFG_AN_EN_B) + +struct hclge_config_auto_neg_cmd { + __le32 cfg_an_cmd_flag; + u8 rsv[20]; +}; + +struct hclge_sfp_info_cmd { + __le32 speed; + u8 query_type; /* 0: sfp speed, 1: active speed */ + u8 active_fec; + u8 autoneg; /* autoneg state */ + u8 autoneg_ability; /* whether support autoneg */ + __le32 speed_ability; /* speed ability for current media */ + __le32 module_type; + u8 fec_ability; + u8 lane_num; + u8 rsv[6]; +}; + +#define HCLGE_MAC_CFG_FEC_AUTO_EN_B 0 +#define HCLGE_MAC_CFG_FEC_MODE_S 1 +#define HCLGE_MAC_CFG_FEC_MODE_M GENMASK(3, 1) +#define HCLGE_MAC_CFG_FEC_SET_DEF_B 0 +#define HCLGE_MAC_CFG_FEC_CLR_DEF_B 1 + +#define HCLGE_MAC_FEC_OFF 0 +#define HCLGE_MAC_FEC_BASER 1 +#define HCLGE_MAC_FEC_RS 2 +#define HCLGE_MAC_FEC_LLRS 3 +struct hclge_config_fec_cmd { + u8 fec_mode; + u8 default_config; + u8 rsv[22]; +}; + +#define HCLGE_FEC_STATS_CMD_NUM 4 + +struct hclge_query_fec_stats_cmd { + /* fec rs mode total stats */ + __le32 rs_fec_corr_blocks; + __le32 rs_fec_uncorr_blocks; + __le32 rs_fec_error_blocks; + /* fec base-r mode per lanes stats */ + u8 base_r_lane_num; + u8 rsv[3]; + __le32 base_r_fec_corr_blocks; + __le32 base_r_fec_uncorr_blocks; +}; + +#define HCLGE_MAC_UPLINK_PORT 0x100 + +struct hclge_config_max_frm_size_cmd { + __le16 max_frm_size; + u8 min_frm_size; + u8 rsv[21]; +}; + +enum hclge_mac_vlan_tbl_opcode { + HCLGE_MAC_VLAN_ADD, /* Add new or modify mac_vlan */ + HCLGE_MAC_VLAN_UPDATE, /* Modify other fields of this table */ + HCLGE_MAC_VLAN_REMOVE, /* Remove a entry through mac_vlan key */ + HCLGE_MAC_VLAN_LKUP, /* Lookup a entry through mac_vlan key */ +}; + +enum hclge_mac_vlan_add_resp_code { + HCLGE_ADD_UC_OVERFLOW = 2, /* ADD failed for UC overflow */ + HCLGE_ADD_MC_OVERFLOW, /* ADD failed for MC overflow */ +}; + +#define HCLGE_MAC_VLAN_BIT0_EN_B 0 +#define HCLGE_MAC_VLAN_BIT1_EN_B 1 +#define HCLGE_MAC_EPORT_SW_EN_B 12 +#define HCLGE_MAC_EPORT_TYPE_B 11 +#define HCLGE_MAC_EPORT_VFID_S 3 +#define HCLGE_MAC_EPORT_VFID_M GENMASK(10, 3) +#define HCLGE_MAC_EPORT_PFID_S 0 +#define HCLGE_MAC_EPORT_PFID_M GENMASK(2, 0) +struct hclge_mac_vlan_tbl_entry_cmd { + u8 flags; + u8 resp_code; + __le16 vlan_tag; + __le32 mac_addr_hi32; + __le16 mac_addr_lo16; + __le16 rsv1; + u8 entry_type; + u8 mc_mac_en; + __le16 egress_port; + __le16 egress_queue; + u8 rsv2[6]; +}; + +#define HCLGE_UMV_SPC_ALC_B 0 +struct hclge_umv_spc_alc_cmd { + u8 allocate; + u8 rsv1[3]; + __le32 space_size; + u8 rsv2[16]; +}; + +#define HCLGE_MAC_MGR_MASK_VLAN_B BIT(0) +#define HCLGE_MAC_MGR_MASK_MAC_B BIT(1) +#define HCLGE_MAC_MGR_MASK_ETHERTYPE_B BIT(2) + +struct hclge_mac_mgr_tbl_entry_cmd { + u8 flags; + u8 resp_code; + __le16 vlan_tag; + u8 mac_addr[ETH_ALEN]; + __le16 rsv1; + __le16 ethter_type; + __le16 egress_port; + __le16 egress_queue; + u8 sw_port_id_aware; + u8 rsv2; + u8 i_port_bitmap; + u8 i_port_direction; + u8 rsv3[2]; +}; + +struct hclge_vlan_filter_ctrl_cmd { + u8 vlan_type; + u8 vlan_fe; + u8 rsv1[2]; + u8 vf_id; + u8 rsv2[19]; +}; + +#define HCLGE_VLAN_ID_OFFSET_STEP 160 +#define HCLGE_VLAN_BYTE_SIZE 8 +#define HCLGE_VLAN_OFFSET_BITMAP \ + (HCLGE_VLAN_ID_OFFSET_STEP / HCLGE_VLAN_BYTE_SIZE) + +struct hclge_vlan_filter_pf_cfg_cmd { + u8 vlan_offset; + u8 vlan_cfg; + u8 rsv[2]; + u8 vlan_offset_bitmap[HCLGE_VLAN_OFFSET_BITMAP]; +}; + +#define HCLGE_MAX_VF_BYTES 16 + +struct hclge_vlan_filter_vf_cfg_cmd { + __le16 vlan_id; + u8 resp_code; + u8 rsv; + u8 vlan_cfg; + u8 rsv1[3]; + u8 vf_bitmap[HCLGE_MAX_VF_BYTES]; +}; + +#define HCLGE_INGRESS_BYPASS_B 0 +struct hclge_port_vlan_filter_bypass_cmd { + u8 bypass_state; + u8 rsv1[3]; + u8 vf_id; + u8 rsv2[19]; +}; + +#define HCLGE_SWITCH_ANTI_SPOOF_B 0U +#define HCLGE_SWITCH_ALW_LPBK_B 1U +#define HCLGE_SWITCH_ALW_LCL_LPBK_B 2U +#define HCLGE_SWITCH_ALW_DST_OVRD_B 3U +#define HCLGE_SWITCH_NO_MASK 0x0 +#define HCLGE_SWITCH_ANTI_SPOOF_MASK 0xFE +#define HCLGE_SWITCH_ALW_LPBK_MASK 0xFD +#define HCLGE_SWITCH_ALW_LCL_LPBK_MASK 0xFB +#define HCLGE_SWITCH_LW_DST_OVRD_MASK 0xF7 + +struct hclge_mac_vlan_switch_cmd { + u8 roce_sel; + u8 rsv1[3]; + __le32 func_id; + u8 switch_param; + u8 rsv2[3]; + u8 param_mask; + u8 rsv3[11]; +}; + +enum hclge_mac_vlan_cfg_sel { + HCLGE_MAC_VLAN_NIC_SEL = 0, + HCLGE_MAC_VLAN_ROCE_SEL, +}; + +#define HCLGE_ACCEPT_TAG1_B 0 +#define HCLGE_ACCEPT_UNTAG1_B 1 +#define HCLGE_PORT_INS_TAG1_EN_B 2 +#define HCLGE_PORT_INS_TAG2_EN_B 3 +#define HCLGE_CFG_NIC_ROCE_SEL_B 4 +#define HCLGE_ACCEPT_TAG2_B 5 +#define HCLGE_ACCEPT_UNTAG2_B 6 +#define HCLGE_TAG_SHIFT_MODE_EN_B 7 +#define HCLGE_VF_NUM_PER_BYTE 8 + +struct hclge_vport_vtag_tx_cfg_cmd { + u8 vport_vlan_cfg; + u8 vf_offset; + u8 rsv1[2]; + __le16 def_vlan_tag1; + __le16 def_vlan_tag2; + u8 vf_bitmap[HCLGE_VF_NUM_PER_BYTE]; + u8 rsv2[8]; +}; + +#define HCLGE_REM_TAG1_EN_B 0 +#define HCLGE_REM_TAG2_EN_B 1 +#define HCLGE_SHOW_TAG1_EN_B 2 +#define HCLGE_SHOW_TAG2_EN_B 3 +#define HCLGE_DISCARD_TAG1_EN_B 5 +#define HCLGE_DISCARD_TAG2_EN_B 6 +struct hclge_vport_vtag_rx_cfg_cmd { + u8 vport_vlan_cfg; + u8 vf_offset; + u8 rsv1[6]; + u8 vf_bitmap[HCLGE_VF_NUM_PER_BYTE]; + u8 rsv2[8]; +}; + +struct hclge_tx_vlan_type_cfg_cmd { + __le16 ot_vlan_type; + __le16 in_vlan_type; + u8 rsv[20]; +}; + +struct hclge_rx_vlan_type_cfg_cmd { + __le16 ot_fst_vlan_type; + __le16 ot_sec_vlan_type; + __le16 in_fst_vlan_type; + __le16 in_sec_vlan_type; + u8 rsv[16]; +}; + +struct hclge_cfg_com_tqp_queue_cmd { + __le16 tqp_id; + __le16 stream_id; + u8 enable; + u8 rsv[19]; +}; + +struct hclge_cfg_tx_queue_pointer_cmd { + __le16 tqp_id; + __le16 tx_tail; + __le16 tx_head; + __le16 fbd_num; + __le16 ring_offset; + u8 rsv[14]; +}; + +#pragma pack(1) +struct hclge_mac_ethertype_idx_rd_cmd { + u8 flags; + u8 resp_code; + __le16 vlan_tag; + u8 mac_addr[ETH_ALEN]; + __le16 index; + __le16 ethter_type; + __le16 egress_port; + __le16 egress_queue; + __le16 rev0; + u8 i_port_bitmap; + u8 i_port_direction; + u8 rev1[2]; +}; + +#pragma pack() + +#define HCLGE_TSO_MSS_MIN_S 0 +#define HCLGE_TSO_MSS_MIN_M GENMASK(13, 0) + +#define HCLGE_TSO_MSS_MAX_S 16 +#define HCLGE_TSO_MSS_MAX_M GENMASK(29, 16) + +struct hclge_cfg_tso_status_cmd { + __le16 tso_mss_min; + __le16 tso_mss_max; + u8 rsv[20]; +}; + +#define HCLGE_GRO_EN_B 0 +struct hclge_cfg_gro_status_cmd { + u8 gro_en; + u8 rsv[23]; +}; + +#define HCLGE_TSO_MSS_MIN 256 +#define HCLGE_TSO_MSS_MAX 9668 + +#define HCLGE_TQP_RESET_B 0 +struct hclge_reset_tqp_queue_cmd { + __le16 tqp_id; + u8 reset_req; + u8 ready_to_reset; + u8 rsv[20]; +}; + +#define HCLGE_CFG_RESET_MAC_B 3 +#define HCLGE_CFG_RESET_FUNC_B 7 +#define HCLGE_CFG_RESET_RCB_B 1 +struct hclge_reset_cmd { + u8 mac_func_reset; + u8 fun_reset_vfid; + u8 fun_reset_rcb; + u8 rsv; + __le16 fun_reset_rcb_vqid_start; + __le16 fun_reset_rcb_vqid_num; + u8 fun_reset_rcb_return_status; + u8 rsv1[15]; +}; + +#define HCLGE_PF_RESET_DONE_BIT BIT(0) + +struct hclge_pf_rst_done_cmd { + u8 pf_rst_done; + u8 rsv[23]; +}; + +#define HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B BIT(0) +#define HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B BIT(2) +#define HCLGE_CMD_GE_PHY_INNER_LOOP_B BIT(3) +#define HCLGE_CMD_COMMON_LB_DONE_B BIT(0) +#define HCLGE_CMD_COMMON_LB_SUCCESS_B BIT(1) +struct hclge_common_lb_cmd { + u8 mask; + u8 enable; + u8 result; + u8 rsv[21]; +}; + +#define HCLGE_DEFAULT_TX_BUF 0x4000 /* 16k bytes */ +#define HCLGE_TOTAL_PKT_BUF 0x108000 /* 1.03125M bytes */ +#define HCLGE_DEFAULT_DV 0xA000 /* 40k byte */ +#define HCLGE_DEFAULT_NON_DCB_DV 0x7800 /* 30K byte */ +#define HCLGE_NON_DCB_ADDITIONAL_BUF 0x1400 /* 5120 byte */ + +#define HCLGE_LED_LOCATE_STATE_S 0 +#define HCLGE_LED_LOCATE_STATE_M GENMASK(1, 0) + +struct hclge_set_led_state_cmd { + u8 rsv1[3]; + u8 locate_led_config; + u8 rsv2[20]; +}; + +struct hclge_get_fd_mode_cmd { + u8 mode; + u8 enable; + u8 rsv[22]; +}; + +struct hclge_get_fd_allocation_cmd { + __le32 stage1_entry_num; + __le32 stage2_entry_num; + __le16 stage1_counter_num; + __le16 stage2_counter_num; + u8 rsv[12]; +}; + +struct hclge_set_fd_key_config_cmd { + u8 stage; + u8 key_select; + u8 inner_sipv6_word_en; + u8 inner_dipv6_word_en; + u8 outer_sipv6_word_en; + u8 outer_dipv6_word_en; + u8 rsv1[2]; + __le32 tuple_mask; + __le32 meta_data_mask; + u8 rsv2[8]; +}; + +#define HCLGE_FD_EPORT_SW_EN_B 0 +struct hclge_fd_tcam_config_1_cmd { + u8 stage; + u8 xy_sel; + u8 port_info; + u8 rsv1[1]; + __le32 index; + u8 entry_vld; + u8 rsv2[7]; + u8 tcam_data[8]; +}; + +struct hclge_fd_tcam_config_2_cmd { + u8 tcam_data[24]; +}; + +struct hclge_fd_tcam_config_3_cmd { + u8 tcam_data[20]; + u8 rsv[4]; +}; + +#define HCLGE_FD_AD_DROP_B 0 +#define HCLGE_FD_AD_DIRECT_QID_B 1 +#define HCLGE_FD_AD_QID_S 2 +#define HCLGE_FD_AD_QID_M GENMASK(11, 2) +#define HCLGE_FD_AD_USE_COUNTER_B 12 +#define HCLGE_FD_AD_COUNTER_NUM_S 13 +#define HCLGE_FD_AD_COUNTER_NUM_M GENMASK(20, 13) +#define HCLGE_FD_AD_NXT_STEP_B 20 +#define HCLGE_FD_AD_NXT_KEY_S 21 +#define HCLGE_FD_AD_NXT_KEY_M GENMASK(25, 21) +#define HCLGE_FD_AD_WR_RULE_ID_B 0 +#define HCLGE_FD_AD_RULE_ID_S 1 +#define HCLGE_FD_AD_RULE_ID_M GENMASK(12, 1) +#define HCLGE_FD_AD_TC_OVRD_B 16 +#define HCLGE_FD_AD_TC_SIZE_S 17 +#define HCLGE_FD_AD_TC_SIZE_M GENMASK(20, 17) + +struct hclge_fd_ad_config_cmd { + u8 stage; + u8 rsv1[3]; + __le32 index; + __le64 ad_data; + u8 rsv2[8]; +}; + +struct hclge_fd_ad_cnt_read_cmd { + u8 rsv0[4]; + __le16 index; + u8 rsv1[2]; + __le64 cnt; + u8 rsv2[8]; +}; + +#define HCLGE_FD_USER_DEF_OFT_S 0 +#define HCLGE_FD_USER_DEF_OFT_M GENMASK(14, 0) +#define HCLGE_FD_USER_DEF_EN_B 15 +struct hclge_fd_user_def_cfg_cmd { + __le16 ol2_cfg; + __le16 l2_cfg; + __le16 ol3_cfg; + __le16 l3_cfg; + __le16 ol4_cfg; + __le16 l4_cfg; + u8 rsv[12]; +}; + +struct hclge_get_imp_bd_cmd { + __le32 bd_num; + u8 rsv[20]; +}; + +struct hclge_query_ppu_pf_other_int_dfx_cmd { + __le16 over_8bd_no_fe_qid; + __le16 over_8bd_no_fe_vf_id; + __le16 tso_mss_cmp_min_err_qid; + __le16 tso_mss_cmp_min_err_vf_id; + __le16 tso_mss_cmp_max_err_qid; + __le16 tso_mss_cmp_max_err_vf_id; + __le16 tx_rd_fbd_poison_qid; + __le16 tx_rd_fbd_poison_vf_id; + __le16 rx_rd_fbd_poison_qid; + __le16 rx_rd_fbd_poison_vf_id; + u8 rsv[4]; +}; + +#define HCLGE_SFP_INFO_CMD_NUM 6 +#define HCLGE_SFP_INFO_BD0_LEN 20 +#define HCLGE_SFP_INFO_BDX_LEN 24 +#define HCLGE_SFP_INFO_MAX_LEN \ + (HCLGE_SFP_INFO_BD0_LEN + \ + (HCLGE_SFP_INFO_CMD_NUM - 1) * HCLGE_SFP_INFO_BDX_LEN) + +struct hclge_sfp_info_bd0_cmd { + __le16 offset; + __le16 read_len; + u8 data[HCLGE_SFP_INFO_BD0_LEN]; +}; + +#define HCLGE_QUERY_DEV_SPECS_BD_NUM 4 + +struct hclge_dev_specs_0_cmd { + __le32 rsv0; + __le32 mac_entry_num; + __le32 mng_entry_num; + __le16 rss_ind_tbl_size; + __le16 rss_key_size; + __le16 int_ql_max; + u8 max_non_tso_bd_num; + u8 rsv1; + __le32 max_tm_rate; +}; + +#define HCLGE_DEF_MAX_INT_GL 0x1FE0U + +struct hclge_dev_specs_1_cmd { + __le16 max_frm_size; + __le16 max_qset_num; + __le16 max_int_gl; + u8 rsv0[2]; + __le16 umv_size; + __le16 mc_mac_size; + u8 rsv1[12]; +}; + +/* mac speed type defined in firmware command */ +enum HCLGE_FIRMWARE_MAC_SPEED { + HCLGE_FW_MAC_SPEED_1G, + HCLGE_FW_MAC_SPEED_10G, + HCLGE_FW_MAC_SPEED_25G, + HCLGE_FW_MAC_SPEED_40G, + HCLGE_FW_MAC_SPEED_50G, + HCLGE_FW_MAC_SPEED_100G, + HCLGE_FW_MAC_SPEED_10M, + HCLGE_FW_MAC_SPEED_100M, + HCLGE_FW_MAC_SPEED_200G, +}; + +#define HCLGE_PHY_LINK_SETTING_BD_NUM 2 + +struct hclge_phy_link_ksetting_0_cmd { + __le32 speed; + u8 duplex; + u8 autoneg; + u8 eth_tp_mdix; + u8 eth_tp_mdix_ctrl; + u8 port; + u8 transceiver; + u8 phy_address; + u8 rsv; + __le32 supported; + __le32 advertising; + __le32 lp_advertising; +}; + +struct hclge_phy_link_ksetting_1_cmd { + u8 master_slave_cfg; + u8 master_slave_state; + u8 rsv[22]; +}; + +struct hclge_phy_reg_cmd { + __le16 reg_addr; + u8 rsv0[2]; + __le16 reg_val; + u8 rsv1[18]; +}; + +struct hclge_hw; +int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num); +enum hclge_comm_cmd_status hclge_cmd_mdio_write(struct hclge_hw *hw, + struct hclge_desc *desc); +enum hclge_comm_cmd_status hclge_cmd_mdio_read(struct hclge_hw *hw, + struct hclge_desc *desc); +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c new file mode 100644 index 000000000..2740f0d70 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c @@ -0,0 +1,656 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. + +#include "hclge_main.h" +#include "hclge_dcb.h" +#include "hclge_tm.h" +#include "hnae3.h" + +#define BW_PERCENT 100 + +static int hclge_ieee_ets_to_tm_info(struct hclge_dev *hdev, + struct ieee_ets *ets) +{ + u8 i; + + for (i = 0; i < HNAE3_MAX_TC; i++) { + switch (ets->tc_tsa[i]) { + case IEEE_8021QAZ_TSA_STRICT: + hdev->tm_info.tc_info[i].tc_sch_mode = + HCLGE_SCH_MODE_SP; + hdev->tm_info.pg_info[0].tc_dwrr[i] = 0; + break; + case IEEE_8021QAZ_TSA_ETS: + hdev->tm_info.tc_info[i].tc_sch_mode = + HCLGE_SCH_MODE_DWRR; + hdev->tm_info.pg_info[0].tc_dwrr[i] = + ets->tc_tx_bw[i]; + break; + default: + /* Hardware only supports SP (strict priority) + * or ETS (enhanced transmission selection) + * algorithms, if we receive some other value + * from dcbnl, then throw an error. + */ + return -EINVAL; + } + } + + hclge_tm_prio_tc_info_update(hdev, ets->prio_tc); + + return 0; +} + +static void hclge_tm_info_to_ieee_ets(struct hclge_dev *hdev, + struct ieee_ets *ets) +{ + u32 i; + + memset(ets, 0, sizeof(*ets)); + ets->willing = 1; + ets->ets_cap = hdev->tc_max; + + for (i = 0; i < HNAE3_MAX_TC; i++) { + ets->prio_tc[i] = hdev->tm_info.prio_tc[i]; + if (i < hdev->tm_info.num_tc) + ets->tc_tx_bw[i] = hdev->tm_info.pg_info[0].tc_dwrr[i]; + else + ets->tc_tx_bw[i] = 0; + + if (hdev->tm_info.tc_info[i].tc_sch_mode == + HCLGE_SCH_MODE_SP) + ets->tc_tsa[i] = IEEE_8021QAZ_TSA_STRICT; + else + ets->tc_tsa[i] = IEEE_8021QAZ_TSA_ETS; + } +} + +/* IEEE std */ +static int hclge_ieee_getets(struct hnae3_handle *h, struct ieee_ets *ets) +{ + struct hclge_vport *vport = hclge_get_vport(h); + struct hclge_dev *hdev = vport->back; + + hclge_tm_info_to_ieee_ets(hdev, ets); + + return 0; +} + +static int hclge_dcb_common_validate(struct hclge_dev *hdev, u8 num_tc, + u8 *prio_tc) +{ + int i; + + if (num_tc > hdev->tc_max) { + dev_err(&hdev->pdev->dev, + "tc num checking failed, %u > tc_max(%u)\n", + num_tc, hdev->tc_max); + return -EINVAL; + } + + for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) { + if (prio_tc[i] >= num_tc) { + dev_err(&hdev->pdev->dev, + "prio_tc[%d] checking failed, %u >= num_tc(%u)\n", + i, prio_tc[i], num_tc); + return -EINVAL; + } + } + + if (num_tc > hdev->vport[0].alloc_tqps) { + dev_err(&hdev->pdev->dev, + "allocated tqp checking failed, %u > tqp(%u)\n", + num_tc, hdev->vport[0].alloc_tqps); + return -EINVAL; + } + + return 0; +} + +static u8 hclge_ets_tc_changed(struct hclge_dev *hdev, struct ieee_ets *ets, + bool *changed) +{ + u8 max_tc_id = 0; + u8 i; + + for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) { + if (ets->prio_tc[i] != hdev->tm_info.prio_tc[i]) + *changed = true; + + if (ets->prio_tc[i] > max_tc_id) + max_tc_id = ets->prio_tc[i]; + } + + /* return max tc number, max tc id need to plus 1 */ + return max_tc_id + 1; +} + +static int hclge_ets_sch_mode_validate(struct hclge_dev *hdev, + struct ieee_ets *ets, bool *changed, + u8 tc_num) +{ + bool has_ets_tc = false; + u32 total_ets_bw = 0; + u8 i; + + for (i = 0; i < HNAE3_MAX_TC; i++) { + switch (ets->tc_tsa[i]) { + case IEEE_8021QAZ_TSA_STRICT: + if (hdev->tm_info.tc_info[i].tc_sch_mode != + HCLGE_SCH_MODE_SP) + *changed = true; + break; + case IEEE_8021QAZ_TSA_ETS: + if (i >= tc_num) { + dev_err(&hdev->pdev->dev, + "tc%u is disabled, cannot set ets bw\n", + i); + return -EINVAL; + } + + /* The hardware will switch to sp mode if bandwidth is + * 0, so limit ets bandwidth must be greater than 0. + */ + if (!ets->tc_tx_bw[i]) { + dev_err(&hdev->pdev->dev, + "tc%u ets bw cannot be 0\n", i); + return -EINVAL; + } + + if (hdev->tm_info.tc_info[i].tc_sch_mode != + HCLGE_SCH_MODE_DWRR) + *changed = true; + + total_ets_bw += ets->tc_tx_bw[i]; + has_ets_tc = true; + break; + default: + return -EINVAL; + } + } + + if (has_ets_tc && total_ets_bw != BW_PERCENT) + return -EINVAL; + + return 0; +} + +static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets, + u8 *tc, bool *changed) +{ + u8 tc_num; + int ret; + + tc_num = hclge_ets_tc_changed(hdev, ets, changed); + + ret = hclge_dcb_common_validate(hdev, tc_num, ets->prio_tc); + if (ret) + return ret; + + ret = hclge_ets_sch_mode_validate(hdev, ets, changed, tc_num); + if (ret) + return ret; + + *tc = tc_num; + if (*tc != hdev->tm_info.num_tc) + *changed = true; + + return 0; +} + +static int hclge_map_update(struct hclge_dev *hdev) +{ + int ret; + + ret = hclge_tm_schd_setup_hw(hdev); + if (ret) + return ret; + + ret = hclge_pause_setup_hw(hdev, false); + if (ret) + return ret; + + ret = hclge_buffer_alloc(hdev); + if (ret) + return ret; + + hclge_comm_rss_indir_init_cfg(hdev->ae_dev, &hdev->rss_cfg); + + return hclge_rss_init_hw(hdev); +} + +static int hclge_notify_down_uinit(struct hclge_dev *hdev) +{ + int ret; + + ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); + if (ret) + return ret; + + return hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT); +} + +static int hclge_notify_init_up(struct hclge_dev *hdev) +{ + int ret; + + ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT); + if (ret) + return ret; + + return hclge_notify_client(hdev, HNAE3_UP_CLIENT); +} + +static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets) +{ + struct hclge_vport *vport = hclge_get_vport(h); + struct net_device *netdev = h->kinfo.netdev; + struct hclge_dev *hdev = vport->back; + bool map_changed = false; + u8 num_tc = 0; + int ret; + + if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) || + h->kinfo.tc_info.mqprio_active) + return -EINVAL; + + ret = hclge_ets_validate(hdev, ets, &num_tc, &map_changed); + if (ret) + return ret; + + if (map_changed) { + netif_dbg(h, drv, netdev, "set ets\n"); + + ret = hclge_notify_down_uinit(hdev); + if (ret) + return ret; + } + + hclge_tm_schd_info_update(hdev, num_tc); + h->kinfo.tc_info.dcb_ets_active = num_tc > 1; + + ret = hclge_ieee_ets_to_tm_info(hdev, ets); + if (ret) + goto err_out; + + if (map_changed) { + ret = hclge_map_update(hdev); + if (ret) + goto err_out; + + return hclge_notify_init_up(hdev); + } + + return hclge_tm_dwrr_cfg(hdev); + +err_out: + if (!map_changed) + return ret; + + hclge_notify_init_up(hdev); + + return ret; +} + +static int hclge_ieee_getpfc(struct hnae3_handle *h, struct ieee_pfc *pfc) +{ + struct hclge_vport *vport = hclge_get_vport(h); + struct hclge_dev *hdev = vport->back; + int ret; + + memset(pfc, 0, sizeof(*pfc)); + pfc->pfc_cap = hdev->pfc_max; + pfc->pfc_en = hdev->tm_info.pfc_en; + + ret = hclge_mac_update_stats(hdev); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to update MAC stats, ret = %d.\n", ret); + return ret; + } + + hclge_pfc_tx_stats_get(hdev, pfc->requests); + hclge_pfc_rx_stats_get(hdev, pfc->indications); + + return 0; +} + +static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc) +{ + struct hclge_vport *vport = hclge_get_vport(h); + struct net_device *netdev = h->kinfo.netdev; + struct hclge_dev *hdev = vport->back; + u8 i, j, pfc_map, *prio_tc; + int ret; + + if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) + return -EINVAL; + + if (pfc->pfc_en == hdev->tm_info.pfc_en) + return 0; + + prio_tc = hdev->tm_info.prio_tc; + pfc_map = 0; + + for (i = 0; i < hdev->tm_info.num_tc; i++) { + for (j = 0; j < HNAE3_MAX_USER_PRIO; j++) { + if ((prio_tc[j] == i) && (pfc->pfc_en & BIT(j))) { + pfc_map |= BIT(i); + break; + } + } + } + + hdev->tm_info.hw_pfc_map = pfc_map; + hdev->tm_info.pfc_en = pfc->pfc_en; + + netif_dbg(h, drv, netdev, + "set pfc: pfc_en=%x, pfc_map=%x, num_tc=%u\n", + pfc->pfc_en, pfc_map, hdev->tm_info.num_tc); + + hclge_tm_pfc_info_update(hdev); + + ret = hclge_pause_setup_hw(hdev, false); + if (ret) + return ret; + + ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); + if (ret) + return ret; + + ret = hclge_buffer_alloc(hdev); + if (ret) { + hclge_notify_client(hdev, HNAE3_UP_CLIENT); + return ret; + } + + return hclge_notify_client(hdev, HNAE3_UP_CLIENT); +} + +static int hclge_ieee_setapp(struct hnae3_handle *h, struct dcb_app *app) +{ + struct hclge_vport *vport = hclge_get_vport(h); + struct net_device *netdev = h->kinfo.netdev; + struct hclge_dev *hdev = vport->back; + struct dcb_app old_app; + int ret; + + if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP || + app->protocol >= HNAE3_MAX_DSCP || + app->priority >= HNAE3_MAX_USER_PRIO) + return -EINVAL; + + dev_info(&hdev->pdev->dev, "setapp dscp=%u priority=%u\n", + app->protocol, app->priority); + + if (app->priority == h->kinfo.dscp_prio[app->protocol]) + return 0; + + ret = dcb_ieee_setapp(netdev, app); + if (ret) + return ret; + + old_app.selector = IEEE_8021QAZ_APP_SEL_DSCP; + old_app.protocol = app->protocol; + old_app.priority = h->kinfo.dscp_prio[app->protocol]; + + h->kinfo.dscp_prio[app->protocol] = app->priority; + ret = hclge_dscp_to_tc_map(hdev); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to set dscp to tc map, ret = %d\n", ret); + h->kinfo.dscp_prio[app->protocol] = old_app.priority; + (void)dcb_ieee_delapp(netdev, app); + return ret; + } + + vport->nic.kinfo.tc_map_mode = HNAE3_TC_MAP_MODE_DSCP; + if (old_app.priority == HNAE3_PRIO_ID_INVALID) + h->kinfo.dscp_app_cnt++; + else + ret = dcb_ieee_delapp(netdev, &old_app); + + return ret; +} + +static int hclge_ieee_delapp(struct hnae3_handle *h, struct dcb_app *app) +{ + struct hclge_vport *vport = hclge_get_vport(h); + struct net_device *netdev = h->kinfo.netdev; + struct hclge_dev *hdev = vport->back; + int ret; + + if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP || + app->protocol >= HNAE3_MAX_DSCP || + app->priority >= HNAE3_MAX_USER_PRIO || + app->priority != h->kinfo.dscp_prio[app->protocol]) + return -EINVAL; + + dev_info(&hdev->pdev->dev, "delapp dscp=%u priority=%u\n", + app->protocol, app->priority); + + ret = dcb_ieee_delapp(netdev, app); + if (ret) + return ret; + + h->kinfo.dscp_prio[app->protocol] = HNAE3_PRIO_ID_INVALID; + ret = hclge_dscp_to_tc_map(hdev); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to del dscp to tc map, ret = %d\n", ret); + h->kinfo.dscp_prio[app->protocol] = app->priority; + (void)dcb_ieee_setapp(netdev, app); + return ret; + } + + if (h->kinfo.dscp_app_cnt) + h->kinfo.dscp_app_cnt--; + + if (!h->kinfo.dscp_app_cnt) { + vport->nic.kinfo.tc_map_mode = HNAE3_TC_MAP_MODE_PRIO; + ret = hclge_up_to_tc_map(hdev); + } + + return ret; +} + +/* DCBX configuration */ +static u8 hclge_getdcbx(struct hnae3_handle *h) +{ + struct hclge_vport *vport = hclge_get_vport(h); + struct hclge_dev *hdev = vport->back; + + if (h->kinfo.tc_info.mqprio_active) + return 0; + + return hdev->dcbx_cap; +} + +static u8 hclge_setdcbx(struct hnae3_handle *h, u8 mode) +{ + struct hclge_vport *vport = hclge_get_vport(h); + struct net_device *netdev = h->kinfo.netdev; + struct hclge_dev *hdev = vport->back; + + netif_dbg(h, drv, netdev, "set dcbx: mode=%u\n", mode); + + /* No support for LLD_MANAGED modes or CEE */ + if ((mode & DCB_CAP_DCBX_LLD_MANAGED) || + (mode & DCB_CAP_DCBX_VER_CEE) || + !(mode & DCB_CAP_DCBX_HOST)) + return 1; + + hdev->dcbx_cap = mode; + + return 0; +} + +static int hclge_mqprio_qopt_check(struct hclge_dev *hdev, + struct tc_mqprio_qopt_offload *mqprio_qopt) +{ + u16 queue_sum = 0; + int ret; + int i; + + if (!mqprio_qopt->qopt.num_tc) { + mqprio_qopt->qopt.num_tc = 1; + return 0; + } + + ret = hclge_dcb_common_validate(hdev, mqprio_qopt->qopt.num_tc, + mqprio_qopt->qopt.prio_tc_map); + if (ret) + return ret; + + for (i = 0; i < mqprio_qopt->qopt.num_tc; i++) { + if (!is_power_of_2(mqprio_qopt->qopt.count[i])) { + dev_err(&hdev->pdev->dev, + "qopt queue count must be power of 2\n"); + return -EINVAL; + } + + if (mqprio_qopt->qopt.count[i] > hdev->pf_rss_size_max) { + dev_err(&hdev->pdev->dev, + "qopt queue count should be no more than %u\n", + hdev->pf_rss_size_max); + return -EINVAL; + } + + if (mqprio_qopt->qopt.offset[i] != queue_sum) { + dev_err(&hdev->pdev->dev, + "qopt queue offset must start from 0, and being continuous\n"); + return -EINVAL; + } + + if (mqprio_qopt->min_rate[i] || mqprio_qopt->max_rate[i]) { + dev_err(&hdev->pdev->dev, + "qopt tx_rate is not supported\n"); + return -EOPNOTSUPP; + } + + queue_sum = mqprio_qopt->qopt.offset[i]; + queue_sum += mqprio_qopt->qopt.count[i]; + } + if (hdev->vport[0].alloc_tqps < queue_sum) { + dev_err(&hdev->pdev->dev, + "qopt queue count sum should be less than %u\n", + hdev->vport[0].alloc_tqps); + return -EINVAL; + } + + return 0; +} + +static void hclge_sync_mqprio_qopt(struct hnae3_tc_info *tc_info, + struct tc_mqprio_qopt_offload *mqprio_qopt) +{ + memset(tc_info, 0, sizeof(*tc_info)); + tc_info->num_tc = mqprio_qopt->qopt.num_tc; + memcpy(tc_info->prio_tc, mqprio_qopt->qopt.prio_tc_map, + sizeof_field(struct hnae3_tc_info, prio_tc)); + memcpy(tc_info->tqp_count, mqprio_qopt->qopt.count, + sizeof_field(struct hnae3_tc_info, tqp_count)); + memcpy(tc_info->tqp_offset, mqprio_qopt->qopt.offset, + sizeof_field(struct hnae3_tc_info, tqp_offset)); +} + +static int hclge_config_tc(struct hclge_dev *hdev, + struct hnae3_tc_info *tc_info) +{ + int i; + + hclge_tm_schd_info_update(hdev, tc_info->num_tc); + for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) + hdev->tm_info.prio_tc[i] = tc_info->prio_tc[i]; + + return hclge_map_update(hdev); +} + +/* Set up TC for hardware offloaded mqprio in channel mode */ +static int hclge_setup_tc(struct hnae3_handle *h, + struct tc_mqprio_qopt_offload *mqprio_qopt) +{ + struct hclge_vport *vport = hclge_get_vport(h); + struct hnae3_knic_private_info *kinfo; + struct hclge_dev *hdev = vport->back; + struct hnae3_tc_info old_tc_info; + u8 tc = mqprio_qopt->qopt.num_tc; + int ret; + + /* if client unregistered, it's not allowed to change + * mqprio configuration, which may cause uninit ring + * fail. + */ + if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state)) + return -EBUSY; + + kinfo = &vport->nic.kinfo; + if (kinfo->tc_info.dcb_ets_active) + return -EINVAL; + + ret = hclge_mqprio_qopt_check(hdev, mqprio_qopt); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to check mqprio qopt params, ret = %d\n", ret); + return ret; + } + + ret = hclge_notify_down_uinit(hdev); + if (ret) + return ret; + + memcpy(&old_tc_info, &kinfo->tc_info, sizeof(old_tc_info)); + hclge_sync_mqprio_qopt(&kinfo->tc_info, mqprio_qopt); + kinfo->tc_info.mqprio_active = tc > 0; + + ret = hclge_config_tc(hdev, &kinfo->tc_info); + if (ret) + goto err_out; + + return hclge_notify_init_up(hdev); + +err_out: + if (!tc) { + dev_warn(&hdev->pdev->dev, + "failed to destroy mqprio, will active after reset, ret = %d\n", + ret); + } else { + /* roll-back */ + memcpy(&kinfo->tc_info, &old_tc_info, sizeof(old_tc_info)); + if (hclge_config_tc(hdev, &kinfo->tc_info)) + dev_err(&hdev->pdev->dev, + "failed to roll back tc configuration\n"); + } + hclge_notify_init_up(hdev); + + return ret; +} + +static const struct hnae3_dcb_ops hns3_dcb_ops = { + .ieee_getets = hclge_ieee_getets, + .ieee_setets = hclge_ieee_setets, + .ieee_getpfc = hclge_ieee_getpfc, + .ieee_setpfc = hclge_ieee_setpfc, + .ieee_setapp = hclge_ieee_setapp, + .ieee_delapp = hclge_ieee_delapp, + .getdcbx = hclge_getdcbx, + .setdcbx = hclge_setdcbx, + .setup_tc = hclge_setup_tc, +}; + +void hclge_dcb_ops_set(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = hdev->vport; + struct hnae3_knic_private_info *kinfo; + + /* Hdev does not support DCB or vport is + * not a pf, then dcb_ops is not set. + */ + if (!hnae3_dev_dcb_supported(hdev) || + vport->vport_id != 0) + return; + + kinfo = &vport->nic.kinfo; + kinfo->dcb_ops = &hns3_dcb_ops; + hdev->dcbx_cap = DCB_CAP_DCBX_VER_IEEE | DCB_CAP_DCBX_HOST; +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h new file mode 100644 index 000000000..b04702e65 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +// Copyright (c) 2016-2017 Hisilicon Limited. + +#ifndef __HCLGE_DCB_H__ +#define __HCLGE_DCB_H__ + +#include "hclge_main.h" + +#ifdef CONFIG_HNS3_DCB +void hclge_dcb_ops_set(struct hclge_dev *hdev); +#else +static inline void hclge_dcb_ops_set(struct hclge_dev *hdev) {} +#endif + +#endif /* __HCLGE_DCB_H__ */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c new file mode 100644 index 000000000..a1c59f4aa --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c @@ -0,0 +1,2587 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2018-2019 Hisilicon Limited. */ + +#include <linux/device.h> + +#include "hclge_debugfs.h" +#include "hclge_err.h" +#include "hclge_main.h" +#include "hclge_tm.h" +#include "hnae3.h" + +static const char * const state_str[] = { "off", "on" }; +static const char * const hclge_mac_state_str[] = { + "TO_ADD", "TO_DEL", "ACTIVE" +}; + +static const char * const tc_map_mode_str[] = { "PRIO", "DSCP" }; + +static const struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = { + { .cmd = HNAE3_DBG_CMD_REG_BIOS_COMMON, + .dfx_msg = &hclge_dbg_bios_common_reg[0], + .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_bios_common_reg), + .offset = HCLGE_DBG_DFX_BIOS_OFFSET, + .cmd = HCLGE_OPC_DFX_BIOS_COMMON_REG } }, + { .cmd = HNAE3_DBG_CMD_REG_SSU, + .dfx_msg = &hclge_dbg_ssu_reg_0[0], + .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_0), + .offset = HCLGE_DBG_DFX_SSU_0_OFFSET, + .cmd = HCLGE_OPC_DFX_SSU_REG_0 } }, + { .cmd = HNAE3_DBG_CMD_REG_SSU, + .dfx_msg = &hclge_dbg_ssu_reg_1[0], + .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_1), + .offset = HCLGE_DBG_DFX_SSU_1_OFFSET, + .cmd = HCLGE_OPC_DFX_SSU_REG_1 } }, + { .cmd = HNAE3_DBG_CMD_REG_SSU, + .dfx_msg = &hclge_dbg_ssu_reg_2[0], + .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_2), + .offset = HCLGE_DBG_DFX_SSU_2_OFFSET, + .cmd = HCLGE_OPC_DFX_SSU_REG_2 } }, + { .cmd = HNAE3_DBG_CMD_REG_IGU_EGU, + .dfx_msg = &hclge_dbg_igu_egu_reg[0], + .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_igu_egu_reg), + .offset = HCLGE_DBG_DFX_IGU_OFFSET, + .cmd = HCLGE_OPC_DFX_IGU_EGU_REG } }, + { .cmd = HNAE3_DBG_CMD_REG_RPU, + .dfx_msg = &hclge_dbg_rpu_reg_0[0], + .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_0), + .offset = HCLGE_DBG_DFX_RPU_0_OFFSET, + .cmd = HCLGE_OPC_DFX_RPU_REG_0 } }, + { .cmd = HNAE3_DBG_CMD_REG_RPU, + .dfx_msg = &hclge_dbg_rpu_reg_1[0], + .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_1), + .offset = HCLGE_DBG_DFX_RPU_1_OFFSET, + .cmd = HCLGE_OPC_DFX_RPU_REG_1 } }, + { .cmd = HNAE3_DBG_CMD_REG_NCSI, + .dfx_msg = &hclge_dbg_ncsi_reg[0], + .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ncsi_reg), + .offset = HCLGE_DBG_DFX_NCSI_OFFSET, + .cmd = HCLGE_OPC_DFX_NCSI_REG } }, + { .cmd = HNAE3_DBG_CMD_REG_RTC, + .dfx_msg = &hclge_dbg_rtc_reg[0], + .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rtc_reg), + .offset = HCLGE_DBG_DFX_RTC_OFFSET, + .cmd = HCLGE_OPC_DFX_RTC_REG } }, + { .cmd = HNAE3_DBG_CMD_REG_PPP, + .dfx_msg = &hclge_dbg_ppp_reg[0], + .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ppp_reg), + .offset = HCLGE_DBG_DFX_PPP_OFFSET, + .cmd = HCLGE_OPC_DFX_PPP_REG } }, + { .cmd = HNAE3_DBG_CMD_REG_RCB, + .dfx_msg = &hclge_dbg_rcb_reg[0], + .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rcb_reg), + .offset = HCLGE_DBG_DFX_RCB_OFFSET, + .cmd = HCLGE_OPC_DFX_RCB_REG } }, + { .cmd = HNAE3_DBG_CMD_REG_TQP, + .dfx_msg = &hclge_dbg_tqp_reg[0], + .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_tqp_reg), + .offset = HCLGE_DBG_DFX_TQP_OFFSET, + .cmd = HCLGE_OPC_DFX_TQP_REG } }, +}; + +/* make sure: len(name) + interval >= maxlen(item data) + 2, + * for example, name = "pkt_num"(len: 7), the prototype of item data is u32, + * and print as "%u"(maxlen: 10), so the interval should be at least 5. + */ +static void hclge_dbg_fill_content(char *content, u16 len, + const struct hclge_dbg_item *items, + const char **result, u16 size) +{ +#define HCLGE_DBG_LINE_END_LEN 2 + char *pos = content; + u16 item_len; + u16 i; + + if (!len) { + return; + } else if (len <= HCLGE_DBG_LINE_END_LEN) { + *pos++ = '\0'; + return; + } + + memset(content, ' ', len); + len -= HCLGE_DBG_LINE_END_LEN; + + for (i = 0; i < size; i++) { + item_len = strlen(items[i].name) + items[i].interval; + if (len < item_len) + break; + + if (result) { + if (item_len < strlen(result[i])) + break; + memcpy(pos, result[i], strlen(result[i])); + } else { + memcpy(pos, items[i].name, strlen(items[i].name)); + } + pos += item_len; + len -= item_len; + } + *pos++ = '\n'; + *pos++ = '\0'; +} + +static char *hclge_dbg_get_func_id_str(char *buf, u8 id) +{ + if (id) + sprintf(buf, "vf%u", id - 1U); + else + sprintf(buf, "pf"); + + return buf; +} + +static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset, + u32 *bd_num) +{ + struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT]; + int entries_per_desc; + int index; + int ret; + + ret = hclge_query_bd_num_cmd_send(hdev, desc); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get dfx bd_num, offset = %d, ret = %d\n", + offset, ret); + return ret; + } + + entries_per_desc = ARRAY_SIZE(desc[0].data); + index = offset % entries_per_desc; + + *bd_num = le32_to_cpu(desc[offset / entries_per_desc].data[index]); + if (!(*bd_num)) { + dev_err(&hdev->pdev->dev, "The value of dfx bd_num is 0!\n"); + return -EINVAL; + } + + return 0; +} + +static int hclge_dbg_cmd_send(struct hclge_dev *hdev, + struct hclge_desc *desc_src, + int index, int bd_num, + enum hclge_opcode_type cmd) +{ + struct hclge_desc *desc = desc_src; + int ret, i; + + hclge_cmd_setup_basic_desc(desc, cmd, true); + desc->data[0] = cpu_to_le32(index); + + for (i = 1; i < bd_num; i++) { + desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + desc++; + hclge_cmd_setup_basic_desc(desc, cmd, true); + } + + ret = hclge_cmd_send(&hdev->hw, desc_src, bd_num); + if (ret) + dev_err(&hdev->pdev->dev, + "cmd(0x%x) send fail, ret = %d\n", cmd, ret); + return ret; +} + +static int +hclge_dbg_dump_reg_tqp(struct hclge_dev *hdev, + const struct hclge_dbg_reg_type_info *reg_info, + char *buf, int len, int *pos) +{ + const struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg; + const struct hclge_dbg_reg_common_msg *reg_msg = ®_info->reg_msg; + struct hclge_desc *desc_src; + u32 index, entry, i, cnt; + int bd_num, min_num, ret; + struct hclge_desc *desc; + + ret = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset, &bd_num); + if (ret) + return ret; + + desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL); + if (!desc_src) + return -ENOMEM; + + min_num = min_t(int, bd_num * HCLGE_DESC_DATA_LEN, reg_msg->msg_num); + + for (i = 0, cnt = 0; i < min_num; i++, dfx_message++) + *pos += scnprintf(buf + *pos, len - *pos, "item%u = %s\n", + cnt++, dfx_message->message); + + for (i = 0; i < cnt; i++) + *pos += scnprintf(buf + *pos, len - *pos, "item%u\t", i); + + *pos += scnprintf(buf + *pos, len - *pos, "\n"); + + for (index = 0; index < hdev->vport[0].alloc_tqps; index++) { + dfx_message = reg_info->dfx_msg; + desc = desc_src; + ret = hclge_dbg_cmd_send(hdev, desc, index, bd_num, + reg_msg->cmd); + if (ret) + break; + + for (i = 0; i < min_num; i++, dfx_message++) { + entry = i % HCLGE_DESC_DATA_LEN; + if (i > 0 && !entry) + desc++; + + *pos += scnprintf(buf + *pos, len - *pos, "%#x\t", + le32_to_cpu(desc->data[entry])); + } + *pos += scnprintf(buf + *pos, len - *pos, "\n"); + } + + kfree(desc_src); + return ret; +} + +static int +hclge_dbg_dump_reg_common(struct hclge_dev *hdev, + const struct hclge_dbg_reg_type_info *reg_info, + char *buf, int len, int *pos) +{ + const struct hclge_dbg_reg_common_msg *reg_msg = ®_info->reg_msg; + const struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg; + struct hclge_desc *desc_src; + int bd_num, min_num, ret; + struct hclge_desc *desc; + u32 entry, i; + + ret = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset, &bd_num); + if (ret) + return ret; + + desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL); + if (!desc_src) + return -ENOMEM; + + desc = desc_src; + + ret = hclge_dbg_cmd_send(hdev, desc, 0, bd_num, reg_msg->cmd); + if (ret) { + kfree(desc); + return ret; + } + + min_num = min_t(int, bd_num * HCLGE_DESC_DATA_LEN, reg_msg->msg_num); + + for (i = 0; i < min_num; i++, dfx_message++) { + entry = i % HCLGE_DESC_DATA_LEN; + if (i > 0 && !entry) + desc++; + if (!dfx_message->flag) + continue; + + *pos += scnprintf(buf + *pos, len - *pos, "%s: %#x\n", + dfx_message->message, + le32_to_cpu(desc->data[entry])); + } + + kfree(desc_src); + return 0; +} + +static const struct hclge_dbg_status_dfx_info hclge_dbg_mac_en_status[] = { + {HCLGE_MAC_TX_EN_B, "mac_trans_en"}, + {HCLGE_MAC_RX_EN_B, "mac_rcv_en"}, + {HCLGE_MAC_PAD_TX_B, "pad_trans_en"}, + {HCLGE_MAC_PAD_RX_B, "pad_rcv_en"}, + {HCLGE_MAC_1588_TX_B, "1588_trans_en"}, + {HCLGE_MAC_1588_RX_B, "1588_rcv_en"}, + {HCLGE_MAC_APP_LP_B, "mac_app_loop_en"}, + {HCLGE_MAC_LINE_LP_B, "mac_line_loop_en"}, + {HCLGE_MAC_FCS_TX_B, "mac_fcs_tx_en"}, + {HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, "mac_rx_oversize_truncate_en"}, + {HCLGE_MAC_RX_FCS_STRIP_B, "mac_rx_fcs_strip_en"}, + {HCLGE_MAC_RX_FCS_B, "mac_rx_fcs_en"}, + {HCLGE_MAC_TX_UNDER_MIN_ERR_B, "mac_tx_under_min_err_en"}, + {HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, "mac_tx_oversize_truncate_en"} +}; + +static int hclge_dbg_dump_mac_enable_status(struct hclge_dev *hdev, char *buf, + int len, int *pos) +{ + struct hclge_config_mac_mode_cmd *req; + struct hclge_desc desc; + u32 loop_en, i, offset; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to dump mac enable status, ret = %d\n", ret); + return ret; + } + + req = (struct hclge_config_mac_mode_cmd *)desc.data; + loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en); + + for (i = 0; i < ARRAY_SIZE(hclge_dbg_mac_en_status); i++) { + offset = hclge_dbg_mac_en_status[i].offset; + *pos += scnprintf(buf + *pos, len - *pos, "%s: %#x\n", + hclge_dbg_mac_en_status[i].message, + hnae3_get_bit(loop_en, offset)); + } + + return 0; +} + +static int hclge_dbg_dump_mac_frame_size(struct hclge_dev *hdev, char *buf, + int len, int *pos) +{ + struct hclge_config_max_frm_size_cmd *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, true); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to dump mac frame size, ret = %d\n", ret); + return ret; + } + + req = (struct hclge_config_max_frm_size_cmd *)desc.data; + + *pos += scnprintf(buf + *pos, len - *pos, "max_frame_size: %u\n", + le16_to_cpu(req->max_frm_size)); + *pos += scnprintf(buf + *pos, len - *pos, "min_frame_size: %u\n", + req->min_frm_size); + + return 0; +} + +static int hclge_dbg_dump_mac_speed_duplex(struct hclge_dev *hdev, char *buf, + int len, int *pos) +{ +#define HCLGE_MAC_SPEED_SHIFT 0 +#define HCLGE_MAC_SPEED_MASK GENMASK(5, 0) +#define HCLGE_MAC_DUPLEX_SHIFT 7 + + struct hclge_config_mac_speed_dup_cmd *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, true); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to dump mac speed duplex, ret = %d\n", ret); + return ret; + } + + req = (struct hclge_config_mac_speed_dup_cmd *)desc.data; + + *pos += scnprintf(buf + *pos, len - *pos, "speed: %#lx\n", + hnae3_get_field(req->speed_dup, HCLGE_MAC_SPEED_MASK, + HCLGE_MAC_SPEED_SHIFT)); + *pos += scnprintf(buf + *pos, len - *pos, "duplex: %#x\n", + hnae3_get_bit(req->speed_dup, + HCLGE_MAC_DUPLEX_SHIFT)); + return 0; +} + +static int hclge_dbg_dump_mac(struct hclge_dev *hdev, char *buf, int len) +{ + int pos = 0; + int ret; + + ret = hclge_dbg_dump_mac_enable_status(hdev, buf, len, &pos); + if (ret) + return ret; + + ret = hclge_dbg_dump_mac_frame_size(hdev, buf, len, &pos); + if (ret) + return ret; + + return hclge_dbg_dump_mac_speed_duplex(hdev, buf, len, &pos); +} + +static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, char *buf, int len, + int *pos) +{ + struct hclge_dbg_bitmap_cmd req; + struct hclge_desc desc; + u16 qset_id, qset_num; + int ret; + + ret = hclge_tm_get_qset_num(hdev, &qset_num); + if (ret) + return ret; + + *pos += scnprintf(buf + *pos, len - *pos, + "qset_id roce_qset_mask nic_qset_mask qset_shaping_pass qset_bp_status\n"); + for (qset_id = 0; qset_id < qset_num; qset_id++) { + ret = hclge_dbg_cmd_send(hdev, &desc, qset_id, 1, + HCLGE_OPC_QSET_DFX_STS); + if (ret) + return ret; + + req.bitmap = (u8)le32_to_cpu(desc.data[1]); + + *pos += scnprintf(buf + *pos, len - *pos, + "%04u %#x %#x %#x %#x\n", + qset_id, req.bit0, req.bit1, req.bit2, + req.bit3); + } + + return 0; +} + +static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, char *buf, int len, + int *pos) +{ + struct hclge_dbg_bitmap_cmd req; + struct hclge_desc desc; + u8 pri_id, pri_num; + int ret; + + ret = hclge_tm_get_pri_num(hdev, &pri_num); + if (ret) + return ret; + + *pos += scnprintf(buf + *pos, len - *pos, + "pri_id pri_mask pri_cshaping_pass pri_pshaping_pass\n"); + for (pri_id = 0; pri_id < pri_num; pri_id++) { + ret = hclge_dbg_cmd_send(hdev, &desc, pri_id, 1, + HCLGE_OPC_PRI_DFX_STS); + if (ret) + return ret; + + req.bitmap = (u8)le32_to_cpu(desc.data[1]); + + *pos += scnprintf(buf + *pos, len - *pos, + "%03u %#x %#x %#x\n", + pri_id, req.bit0, req.bit1, req.bit2); + } + + return 0; +} + +static int hclge_dbg_dump_dcb_pg(struct hclge_dev *hdev, char *buf, int len, + int *pos) +{ + struct hclge_dbg_bitmap_cmd req; + struct hclge_desc desc; + u8 pg_id; + int ret; + + *pos += scnprintf(buf + *pos, len - *pos, + "pg_id pg_mask pg_cshaping_pass pg_pshaping_pass\n"); + for (pg_id = 0; pg_id < hdev->tm_info.num_pg; pg_id++) { + ret = hclge_dbg_cmd_send(hdev, &desc, pg_id, 1, + HCLGE_OPC_PG_DFX_STS); + if (ret) + return ret; + + req.bitmap = (u8)le32_to_cpu(desc.data[1]); + + *pos += scnprintf(buf + *pos, len - *pos, + "%03u %#x %#x %#x\n", + pg_id, req.bit0, req.bit1, req.bit2); + } + + return 0; +} + +static int hclge_dbg_dump_dcb_queue(struct hclge_dev *hdev, char *buf, int len, + int *pos) +{ + struct hclge_desc desc; + u16 nq_id; + int ret; + + *pos += scnprintf(buf + *pos, len - *pos, + "nq_id sch_nic_queue_cnt sch_roce_queue_cnt\n"); + for (nq_id = 0; nq_id < hdev->num_tqps; nq_id++) { + ret = hclge_dbg_cmd_send(hdev, &desc, nq_id, 1, + HCLGE_OPC_SCH_NQ_CNT); + if (ret) + return ret; + + *pos += scnprintf(buf + *pos, len - *pos, "%04u %#x", + nq_id, le32_to_cpu(desc.data[1])); + + ret = hclge_dbg_cmd_send(hdev, &desc, nq_id, 1, + HCLGE_OPC_SCH_RQ_CNT); + if (ret) + return ret; + + *pos += scnprintf(buf + *pos, len - *pos, + " %#x\n", + le32_to_cpu(desc.data[1])); + } + + return 0; +} + +static int hclge_dbg_dump_dcb_port(struct hclge_dev *hdev, char *buf, int len, + int *pos) +{ + struct hclge_dbg_bitmap_cmd req; + struct hclge_desc desc; + u8 port_id = 0; + int ret; + + ret = hclge_dbg_cmd_send(hdev, &desc, port_id, 1, + HCLGE_OPC_PORT_DFX_STS); + if (ret) + return ret; + + req.bitmap = (u8)le32_to_cpu(desc.data[1]); + + *pos += scnprintf(buf + *pos, len - *pos, "port_mask: %#x\n", + req.bit0); + *pos += scnprintf(buf + *pos, len - *pos, "port_shaping_pass: %#x\n", + req.bit1); + + return 0; +} + +static int hclge_dbg_dump_dcb_tm(struct hclge_dev *hdev, char *buf, int len, + int *pos) +{ + struct hclge_desc desc[2]; + u8 port_id = 0; + int ret; + + ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1, + HCLGE_OPC_TM_INTERNAL_CNT); + if (ret) + return ret; + + *pos += scnprintf(buf + *pos, len - *pos, "SCH_NIC_NUM: %#x\n", + le32_to_cpu(desc[0].data[1])); + *pos += scnprintf(buf + *pos, len - *pos, "SCH_ROCE_NUM: %#x\n", + le32_to_cpu(desc[0].data[2])); + + ret = hclge_dbg_cmd_send(hdev, desc, port_id, 2, + HCLGE_OPC_TM_INTERNAL_STS); + if (ret) + return ret; + + *pos += scnprintf(buf + *pos, len - *pos, "pri_bp: %#x\n", + le32_to_cpu(desc[0].data[1])); + *pos += scnprintf(buf + *pos, len - *pos, "fifo_dfx_info: %#x\n", + le32_to_cpu(desc[0].data[2])); + *pos += scnprintf(buf + *pos, len - *pos, + "sch_roce_fifo_afull_gap: %#x\n", + le32_to_cpu(desc[0].data[3])); + *pos += scnprintf(buf + *pos, len - *pos, + "tx_private_waterline: %#x\n", + le32_to_cpu(desc[0].data[4])); + *pos += scnprintf(buf + *pos, len - *pos, "tm_bypass_en: %#x\n", + le32_to_cpu(desc[0].data[5])); + *pos += scnprintf(buf + *pos, len - *pos, "SSU_TM_BYPASS_EN: %#x\n", + le32_to_cpu(desc[1].data[0])); + *pos += scnprintf(buf + *pos, len - *pos, "SSU_RESERVE_CFG: %#x\n", + le32_to_cpu(desc[1].data[1])); + + if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) + return 0; + + ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1, + HCLGE_OPC_TM_INTERNAL_STS_1); + if (ret) + return ret; + + *pos += scnprintf(buf + *pos, len - *pos, "TC_MAP_SEL: %#x\n", + le32_to_cpu(desc[0].data[1])); + *pos += scnprintf(buf + *pos, len - *pos, "IGU_PFC_PRI_EN: %#x\n", + le32_to_cpu(desc[0].data[2])); + *pos += scnprintf(buf + *pos, len - *pos, "MAC_PFC_PRI_EN: %#x\n", + le32_to_cpu(desc[0].data[3])); + *pos += scnprintf(buf + *pos, len - *pos, "IGU_PRI_MAP_TC_CFG: %#x\n", + le32_to_cpu(desc[0].data[4])); + *pos += scnprintf(buf + *pos, len - *pos, + "IGU_TX_PRI_MAP_TC_CFG: %#x\n", + le32_to_cpu(desc[0].data[5])); + + return 0; +} + +static int hclge_dbg_dump_dcb(struct hclge_dev *hdev, char *buf, int len) +{ + int pos = 0; + int ret; + + ret = hclge_dbg_dump_dcb_qset(hdev, buf, len, &pos); + if (ret) + return ret; + + ret = hclge_dbg_dump_dcb_pri(hdev, buf, len, &pos); + if (ret) + return ret; + + ret = hclge_dbg_dump_dcb_pg(hdev, buf, len, &pos); + if (ret) + return ret; + + ret = hclge_dbg_dump_dcb_queue(hdev, buf, len, &pos); + if (ret) + return ret; + + ret = hclge_dbg_dump_dcb_port(hdev, buf, len, &pos); + if (ret) + return ret; + + return hclge_dbg_dump_dcb_tm(hdev, buf, len, &pos); +} + +static int hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev, + enum hnae3_dbg_cmd cmd, char *buf, int len) +{ + const struct hclge_dbg_reg_type_info *reg_info; + int pos = 0, ret = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(hclge_dbg_reg_info); i++) { + reg_info = &hclge_dbg_reg_info[i]; + if (cmd == reg_info->cmd) { + if (cmd == HNAE3_DBG_CMD_REG_TQP) + return hclge_dbg_dump_reg_tqp(hdev, reg_info, + buf, len, &pos); + + ret = hclge_dbg_dump_reg_common(hdev, reg_info, buf, + len, &pos); + if (ret) + break; + } + } + + return ret; +} + +static int hclge_dbg_dump_tc(struct hclge_dev *hdev, char *buf, int len) +{ + struct hclge_ets_tc_weight_cmd *ets_weight; + struct hclge_desc desc; + char *sch_mode_str; + int pos = 0; + int ret; + u8 i; + + if (!hnae3_dev_dcb_supported(hdev)) { + dev_err(&hdev->pdev->dev, + "Only DCB-supported dev supports tc\n"); + return -EOPNOTSUPP; + } + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, "failed to get tc weight, ret = %d\n", + ret); + return ret; + } + + ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data; + + pos += scnprintf(buf + pos, len - pos, "enabled tc number: %u\n", + hdev->tm_info.num_tc); + pos += scnprintf(buf + pos, len - pos, "weight_offset: %u\n", + ets_weight->weight_offset); + + pos += scnprintf(buf + pos, len - pos, "TC MODE WEIGHT\n"); + for (i = 0; i < HNAE3_MAX_TC; i++) { + sch_mode_str = ets_weight->tc_weight[i] ? "dwrr" : "sp"; + pos += scnprintf(buf + pos, len - pos, "%u %4s %3u\n", + i, sch_mode_str, ets_weight->tc_weight[i]); + } + + return 0; +} + +static const struct hclge_dbg_item tm_pg_items[] = { + { "ID", 2 }, + { "PRI_MAP", 2 }, + { "MODE", 2 }, + { "DWRR", 2 }, + { "C_IR_B", 2 }, + { "C_IR_U", 2 }, + { "C_IR_S", 2 }, + { "C_BS_B", 2 }, + { "C_BS_S", 2 }, + { "C_FLAG", 2 }, + { "C_RATE(Mbps)", 2 }, + { "P_IR_B", 2 }, + { "P_IR_U", 2 }, + { "P_IR_S", 2 }, + { "P_BS_B", 2 }, + { "P_BS_S", 2 }, + { "P_FLAG", 2 }, + { "P_RATE(Mbps)", 0 } +}; + +static void hclge_dbg_fill_shaper_content(struct hclge_tm_shaper_para *para, + char **result, u8 *index) +{ + sprintf(result[(*index)++], "%3u", para->ir_b); + sprintf(result[(*index)++], "%3u", para->ir_u); + sprintf(result[(*index)++], "%3u", para->ir_s); + sprintf(result[(*index)++], "%3u", para->bs_b); + sprintf(result[(*index)++], "%3u", para->bs_s); + sprintf(result[(*index)++], "%3u", para->flag); + sprintf(result[(*index)++], "%6u", para->rate); +} + +static int __hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *data_str, + char *buf, int len) +{ + struct hclge_tm_shaper_para c_shaper_para, p_shaper_para; + char *result[ARRAY_SIZE(tm_pg_items)], *sch_mode_str; + u8 pg_id, sch_mode, weight, pri_bit_map, i, j; + char content[HCLGE_DBG_TM_INFO_LEN]; + int pos = 0; + int ret; + + for (i = 0; i < ARRAY_SIZE(tm_pg_items); i++) { + result[i] = data_str; + data_str += HCLGE_DBG_DATA_STR_LEN; + } + + hclge_dbg_fill_content(content, sizeof(content), tm_pg_items, + NULL, ARRAY_SIZE(tm_pg_items)); + pos += scnprintf(buf + pos, len - pos, "%s", content); + + for (pg_id = 0; pg_id < hdev->tm_info.num_pg; pg_id++) { + ret = hclge_tm_get_pg_to_pri_map(hdev, pg_id, &pri_bit_map); + if (ret) + return ret; + + ret = hclge_tm_get_pg_sch_mode(hdev, pg_id, &sch_mode); + if (ret) + return ret; + + ret = hclge_tm_get_pg_weight(hdev, pg_id, &weight); + if (ret) + return ret; + + ret = hclge_tm_get_pg_shaper(hdev, pg_id, + HCLGE_OPC_TM_PG_C_SHAPPING, + &c_shaper_para); + if (ret) + return ret; + + ret = hclge_tm_get_pg_shaper(hdev, pg_id, + HCLGE_OPC_TM_PG_P_SHAPPING, + &p_shaper_para); + if (ret) + return ret; + + sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" : + "sp"; + + j = 0; + sprintf(result[j++], "%02u", pg_id); + sprintf(result[j++], "0x%02x", pri_bit_map); + sprintf(result[j++], "%4s", sch_mode_str); + sprintf(result[j++], "%3u", weight); + hclge_dbg_fill_shaper_content(&c_shaper_para, result, &j); + hclge_dbg_fill_shaper_content(&p_shaper_para, result, &j); + + hclge_dbg_fill_content(content, sizeof(content), tm_pg_items, + (const char **)result, + ARRAY_SIZE(tm_pg_items)); + pos += scnprintf(buf + pos, len - pos, "%s", content); + } + + return 0; +} + +static int hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *buf, int len) +{ + char *data_str; + int ret; + + data_str = kcalloc(ARRAY_SIZE(tm_pg_items), + HCLGE_DBG_DATA_STR_LEN, GFP_KERNEL); + if (!data_str) + return -ENOMEM; + + ret = __hclge_dbg_dump_tm_pg(hdev, data_str, buf, len); + + kfree(data_str); + + return ret; +} + +static int hclge_dbg_dump_tm_port(struct hclge_dev *hdev, char *buf, int len) +{ + struct hclge_tm_shaper_para shaper_para; + int pos = 0; + int ret; + + ret = hclge_tm_get_port_shaper(hdev, &shaper_para); + if (ret) + return ret; + + pos += scnprintf(buf + pos, len - pos, + "IR_B IR_U IR_S BS_B BS_S FLAG RATE(Mbps)\n"); + pos += scnprintf(buf + pos, len - pos, + "%3u %3u %3u %3u %3u %1u %6u\n", + shaper_para.ir_b, shaper_para.ir_u, shaper_para.ir_s, + shaper_para.bs_b, shaper_para.bs_s, shaper_para.flag, + shaper_para.rate); + + return 0; +} + +static int hclge_dbg_dump_tm_bp_qset_map(struct hclge_dev *hdev, u8 tc_id, + char *buf, int len) +{ + u32 qset_mapping[HCLGE_BP_EXT_GRP_NUM]; + struct hclge_bp_to_qs_map_cmd *map; + struct hclge_desc desc; + int pos = 0; + u8 group_id; + u8 grp_num; + u16 i = 0; + int ret; + + grp_num = hdev->num_tqps <= HCLGE_TQP_MAX_SIZE_DEV_V2 ? + HCLGE_BP_GRP_NUM : HCLGE_BP_EXT_GRP_NUM; + map = (struct hclge_bp_to_qs_map_cmd *)desc.data; + for (group_id = 0; group_id < grp_num; group_id++) { + hclge_cmd_setup_basic_desc(&desc, + HCLGE_OPC_TM_BP_TO_QSET_MAPPING, + true); + map->tc_id = tc_id; + map->qs_group_id = group_id; + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get bp to qset map, ret = %d\n", + ret); + return ret; + } + + qset_mapping[group_id] = le32_to_cpu(map->qs_bit_map); + } + + pos += scnprintf(buf + pos, len - pos, "INDEX | TM BP QSET MAPPING:\n"); + for (group_id = 0; group_id < grp_num / 8; group_id++) { + pos += scnprintf(buf + pos, len - pos, + "%04d | %08x:%08x:%08x:%08x:%08x:%08x:%08x:%08x\n", + group_id * 256, qset_mapping[i + 7], + qset_mapping[i + 6], qset_mapping[i + 5], + qset_mapping[i + 4], qset_mapping[i + 3], + qset_mapping[i + 2], qset_mapping[i + 1], + qset_mapping[i]); + i += 8; + } + + return pos; +} + +static int hclge_dbg_dump_tm_map(struct hclge_dev *hdev, char *buf, int len) +{ + u16 queue_id; + u16 qset_id; + u8 link_vld; + int pos = 0; + u8 pri_id; + u8 tc_id; + int ret; + + for (queue_id = 0; queue_id < hdev->num_tqps; queue_id++) { + ret = hclge_tm_get_q_to_qs_map(hdev, queue_id, &qset_id); + if (ret) + return ret; + + ret = hclge_tm_get_qset_map_pri(hdev, qset_id, &pri_id, + &link_vld); + if (ret) + return ret; + + ret = hclge_tm_get_q_to_tc(hdev, queue_id, &tc_id); + if (ret) + return ret; + + pos += scnprintf(buf + pos, len - pos, + "QUEUE_ID QSET_ID PRI_ID TC_ID\n"); + pos += scnprintf(buf + pos, len - pos, + "%04u %4u %3u %2u\n", + queue_id, qset_id, pri_id, tc_id); + + if (!hnae3_dev_dcb_supported(hdev)) + continue; + + ret = hclge_dbg_dump_tm_bp_qset_map(hdev, tc_id, buf + pos, + len - pos); + if (ret < 0) + return ret; + pos += ret; + + pos += scnprintf(buf + pos, len - pos, "\n"); + } + + return 0; +} + +static int hclge_dbg_dump_tm_nodes(struct hclge_dev *hdev, char *buf, int len) +{ + struct hclge_tm_nodes_cmd *nodes; + struct hclge_desc desc; + int pos = 0; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to dump tm nodes, ret = %d\n", ret); + return ret; + } + + nodes = (struct hclge_tm_nodes_cmd *)desc.data; + + pos += scnprintf(buf + pos, len - pos, " BASE_ID MAX_NUM\n"); + pos += scnprintf(buf + pos, len - pos, "PG %4u %4u\n", + nodes->pg_base_id, nodes->pg_num); + pos += scnprintf(buf + pos, len - pos, "PRI %4u %4u\n", + nodes->pri_base_id, nodes->pri_num); + pos += scnprintf(buf + pos, len - pos, "QSET %4u %4u\n", + le16_to_cpu(nodes->qset_base_id), + le16_to_cpu(nodes->qset_num)); + pos += scnprintf(buf + pos, len - pos, "QUEUE %4u %4u\n", + le16_to_cpu(nodes->queue_base_id), + le16_to_cpu(nodes->queue_num)); + + return 0; +} + +static const struct hclge_dbg_item tm_pri_items[] = { + { "ID", 4 }, + { "MODE", 2 }, + { "DWRR", 2 }, + { "C_IR_B", 2 }, + { "C_IR_U", 2 }, + { "C_IR_S", 2 }, + { "C_BS_B", 2 }, + { "C_BS_S", 2 }, + { "C_FLAG", 2 }, + { "C_RATE(Mbps)", 2 }, + { "P_IR_B", 2 }, + { "P_IR_U", 2 }, + { "P_IR_S", 2 }, + { "P_BS_B", 2 }, + { "P_BS_S", 2 }, + { "P_FLAG", 2 }, + { "P_RATE(Mbps)", 0 } +}; + +static int hclge_dbg_dump_tm_pri(struct hclge_dev *hdev, char *buf, int len) +{ + char data_str[ARRAY_SIZE(tm_pri_items)][HCLGE_DBG_DATA_STR_LEN]; + struct hclge_tm_shaper_para c_shaper_para, p_shaper_para; + char *result[ARRAY_SIZE(tm_pri_items)], *sch_mode_str; + char content[HCLGE_DBG_TM_INFO_LEN]; + u8 pri_num, sch_mode, weight, i, j; + int pos, ret; + + ret = hclge_tm_get_pri_num(hdev, &pri_num); + if (ret) + return ret; + + for (i = 0; i < ARRAY_SIZE(tm_pri_items); i++) + result[i] = &data_str[i][0]; + + hclge_dbg_fill_content(content, sizeof(content), tm_pri_items, + NULL, ARRAY_SIZE(tm_pri_items)); + pos = scnprintf(buf, len, "%s", content); + + for (i = 0; i < pri_num; i++) { + ret = hclge_tm_get_pri_sch_mode(hdev, i, &sch_mode); + if (ret) + return ret; + + ret = hclge_tm_get_pri_weight(hdev, i, &weight); + if (ret) + return ret; + + ret = hclge_tm_get_pri_shaper(hdev, i, + HCLGE_OPC_TM_PRI_C_SHAPPING, + &c_shaper_para); + if (ret) + return ret; + + ret = hclge_tm_get_pri_shaper(hdev, i, + HCLGE_OPC_TM_PRI_P_SHAPPING, + &p_shaper_para); + if (ret) + return ret; + + sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" : + "sp"; + + j = 0; + sprintf(result[j++], "%04u", i); + sprintf(result[j++], "%4s", sch_mode_str); + sprintf(result[j++], "%3u", weight); + hclge_dbg_fill_shaper_content(&c_shaper_para, result, &j); + hclge_dbg_fill_shaper_content(&p_shaper_para, result, &j); + hclge_dbg_fill_content(content, sizeof(content), tm_pri_items, + (const char **)result, + ARRAY_SIZE(tm_pri_items)); + pos += scnprintf(buf + pos, len - pos, "%s", content); + } + + return 0; +} + +static const struct hclge_dbg_item tm_qset_items[] = { + { "ID", 4 }, + { "MAP_PRI", 2 }, + { "LINK_VLD", 2 }, + { "MODE", 2 }, + { "DWRR", 2 }, + { "IR_B", 2 }, + { "IR_U", 2 }, + { "IR_S", 2 }, + { "BS_B", 2 }, + { "BS_S", 2 }, + { "FLAG", 2 }, + { "RATE(Mbps)", 0 } +}; + +static int hclge_dbg_dump_tm_qset(struct hclge_dev *hdev, char *buf, int len) +{ + char data_str[ARRAY_SIZE(tm_qset_items)][HCLGE_DBG_DATA_STR_LEN]; + char *result[ARRAY_SIZE(tm_qset_items)], *sch_mode_str; + u8 priority, link_vld, sch_mode, weight; + struct hclge_tm_shaper_para shaper_para; + char content[HCLGE_DBG_TM_INFO_LEN]; + u16 qset_num, i; + int ret, pos; + u8 j; + + ret = hclge_tm_get_qset_num(hdev, &qset_num); + if (ret) + return ret; + + for (i = 0; i < ARRAY_SIZE(tm_qset_items); i++) + result[i] = &data_str[i][0]; + + hclge_dbg_fill_content(content, sizeof(content), tm_qset_items, + NULL, ARRAY_SIZE(tm_qset_items)); + pos = scnprintf(buf, len, "%s", content); + + for (i = 0; i < qset_num; i++) { + ret = hclge_tm_get_qset_map_pri(hdev, i, &priority, &link_vld); + if (ret) + return ret; + + ret = hclge_tm_get_qset_sch_mode(hdev, i, &sch_mode); + if (ret) + return ret; + + ret = hclge_tm_get_qset_weight(hdev, i, &weight); + if (ret) + return ret; + + ret = hclge_tm_get_qset_shaper(hdev, i, &shaper_para); + if (ret) + return ret; + + sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" : + "sp"; + + j = 0; + sprintf(result[j++], "%04u", i); + sprintf(result[j++], "%4u", priority); + sprintf(result[j++], "%4u", link_vld); + sprintf(result[j++], "%4s", sch_mode_str); + sprintf(result[j++], "%3u", weight); + hclge_dbg_fill_shaper_content(&shaper_para, result, &j); + + hclge_dbg_fill_content(content, sizeof(content), tm_qset_items, + (const char **)result, + ARRAY_SIZE(tm_qset_items)); + pos += scnprintf(buf + pos, len - pos, "%s", content); + } + + return 0; +} + +static int hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev, char *buf, + int len) +{ + struct hclge_cfg_pause_param_cmd *pause_param; + struct hclge_desc desc; + int pos = 0; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to dump qos pause, ret = %d\n", ret); + return ret; + } + + pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data; + + pos += scnprintf(buf + pos, len - pos, "pause_trans_gap: 0x%x\n", + pause_param->pause_trans_gap); + pos += scnprintf(buf + pos, len - pos, "pause_trans_time: 0x%x\n", + le16_to_cpu(pause_param->pause_trans_time)); + return 0; +} + +#define HCLGE_DBG_TC_MASK 0x0F + +static int hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev, char *buf, + int len) +{ +#define HCLGE_DBG_TC_BIT_WIDTH 4 + + struct hclge_qos_pri_map_cmd *pri_map; + struct hclge_desc desc; + int pos = 0; + u8 *pri_tc; + u8 tc, i; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to dump qos pri map, ret = %d\n", ret); + return ret; + } + + pri_map = (struct hclge_qos_pri_map_cmd *)desc.data; + + pos += scnprintf(buf + pos, len - pos, "vlan_to_pri: 0x%x\n", + pri_map->vlan_pri); + pos += scnprintf(buf + pos, len - pos, "PRI TC\n"); + + pri_tc = (u8 *)pri_map; + for (i = 0; i < HNAE3_MAX_TC; i++) { + tc = pri_tc[i >> 1] >> ((i & 1) * HCLGE_DBG_TC_BIT_WIDTH); + tc &= HCLGE_DBG_TC_MASK; + pos += scnprintf(buf + pos, len - pos, "%u %u\n", i, tc); + } + + return 0; +} + +static int hclge_dbg_dump_qos_dscp_map(struct hclge_dev *hdev, char *buf, + int len) +{ + struct hnae3_knic_private_info *kinfo = &hdev->vport[0].nic.kinfo; + struct hclge_desc desc[HCLGE_DSCP_MAP_TC_BD_NUM]; + u8 *req0 = (u8 *)desc[0].data; + u8 *req1 = (u8 *)desc[1].data; + u8 dscp_tc[HNAE3_MAX_DSCP]; + int pos, ret; + u8 i, j; + + pos = scnprintf(buf, len, "tc map mode: %s\n", + tc_map_mode_str[kinfo->tc_map_mode]); + + if (kinfo->tc_map_mode != HNAE3_TC_MAP_MODE_DSCP) + return 0; + + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QOS_MAP, true); + desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_QOS_MAP, true); + ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_DSCP_MAP_TC_BD_NUM); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to dump qos dscp map, ret = %d\n", ret); + return ret; + } + + pos += scnprintf(buf + pos, len - pos, "\nDSCP PRIO TC\n"); + + /* The low 32 dscp setting use bd0, high 32 dscp setting use bd1 */ + for (i = 0; i < HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM; i++) { + j = i + HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM; + /* Each dscp setting has 4 bits, so each byte saves two dscp + * setting + */ + dscp_tc[i] = req0[i >> 1] >> HCLGE_DSCP_TC_SHIFT(i); + dscp_tc[j] = req1[i >> 1] >> HCLGE_DSCP_TC_SHIFT(i); + dscp_tc[i] &= HCLGE_DBG_TC_MASK; + dscp_tc[j] &= HCLGE_DBG_TC_MASK; + } + + for (i = 0; i < HNAE3_MAX_DSCP; i++) { + if (kinfo->dscp_prio[i] == HNAE3_PRIO_ID_INVALID) + continue; + + pos += scnprintf(buf + pos, len - pos, " %2u %u %u\n", + i, kinfo->dscp_prio[i], dscp_tc[i]); + } + + return 0; +} + +static int hclge_dbg_dump_tx_buf_cfg(struct hclge_dev *hdev, char *buf, int len) +{ + struct hclge_tx_buff_alloc_cmd *tx_buf_cmd; + struct hclge_desc desc; + int pos = 0; + int i, ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to dump tx buf, ret = %d\n", ret); + return ret; + } + + tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc.data; + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) + pos += scnprintf(buf + pos, len - pos, + "tx_packet_buf_tc_%d: 0x%x\n", i, + le16_to_cpu(tx_buf_cmd->tx_pkt_buff[i])); + + return pos; +} + +static int hclge_dbg_dump_rx_priv_buf_cfg(struct hclge_dev *hdev, char *buf, + int len) +{ + struct hclge_rx_priv_buff_cmd *rx_buf_cmd; + struct hclge_desc desc; + int pos = 0; + int i, ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to dump rx priv buf, ret = %d\n", ret); + return ret; + } + + pos += scnprintf(buf + pos, len - pos, "\n"); + + rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc.data; + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) + pos += scnprintf(buf + pos, len - pos, + "rx_packet_buf_tc_%d: 0x%x\n", i, + le16_to_cpu(rx_buf_cmd->buf_num[i])); + + pos += scnprintf(buf + pos, len - pos, "rx_share_buf: 0x%x\n", + le16_to_cpu(rx_buf_cmd->shared_buf)); + + return pos; +} + +static int hclge_dbg_dump_rx_common_wl_cfg(struct hclge_dev *hdev, char *buf, + int len) +{ + struct hclge_rx_com_wl *rx_com_wl; + struct hclge_desc desc; + int pos = 0; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to dump rx common wl, ret = %d\n", ret); + return ret; + } + + rx_com_wl = (struct hclge_rx_com_wl *)desc.data; + pos += scnprintf(buf + pos, len - pos, "\n"); + pos += scnprintf(buf + pos, len - pos, + "rx_com_wl: high: 0x%x, low: 0x%x\n", + le16_to_cpu(rx_com_wl->com_wl.high), + le16_to_cpu(rx_com_wl->com_wl.low)); + + return pos; +} + +static int hclge_dbg_dump_rx_global_pkt_cnt(struct hclge_dev *hdev, char *buf, + int len) +{ + struct hclge_rx_com_wl *rx_packet_cnt; + struct hclge_desc desc; + int pos = 0; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_GBL_PKT_CNT, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to dump rx global pkt cnt, ret = %d\n", ret); + return ret; + } + + rx_packet_cnt = (struct hclge_rx_com_wl *)desc.data; + pos += scnprintf(buf + pos, len - pos, + "rx_global_packet_cnt: high: 0x%x, low: 0x%x\n", + le16_to_cpu(rx_packet_cnt->com_wl.high), + le16_to_cpu(rx_packet_cnt->com_wl.low)); + + return pos; +} + +static int hclge_dbg_dump_rx_priv_wl_buf_cfg(struct hclge_dev *hdev, char *buf, + int len) +{ + struct hclge_rx_priv_wl_buf *rx_priv_wl; + struct hclge_desc desc[2]; + int pos = 0; + int i, ret; + + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_PRIV_WL_ALLOC, true); + desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_PRIV_WL_ALLOC, true); + ret = hclge_cmd_send(&hdev->hw, desc, 2); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to dump rx priv wl buf, ret = %d\n", ret); + return ret; + } + + rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[0].data; + for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++) + pos += scnprintf(buf + pos, len - pos, + "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i, + le16_to_cpu(rx_priv_wl->tc_wl[i].high), + le16_to_cpu(rx_priv_wl->tc_wl[i].low)); + + rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[1].data; + for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++) + pos += scnprintf(buf + pos, len - pos, + "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", + i + HCLGE_TC_NUM_ONE_DESC, + le16_to_cpu(rx_priv_wl->tc_wl[i].high), + le16_to_cpu(rx_priv_wl->tc_wl[i].low)); + + return pos; +} + +static int hclge_dbg_dump_rx_common_threshold_cfg(struct hclge_dev *hdev, + char *buf, int len) +{ + struct hclge_rx_com_thrd *rx_com_thrd; + struct hclge_desc desc[2]; + int pos = 0; + int i, ret; + + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_COM_THRD_ALLOC, true); + desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_COM_THRD_ALLOC, true); + ret = hclge_cmd_send(&hdev->hw, desc, 2); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to dump rx common threshold, ret = %d\n", ret); + return ret; + } + + pos += scnprintf(buf + pos, len - pos, "\n"); + rx_com_thrd = (struct hclge_rx_com_thrd *)desc[0].data; + for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++) + pos += scnprintf(buf + pos, len - pos, + "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i, + le16_to_cpu(rx_com_thrd->com_thrd[i].high), + le16_to_cpu(rx_com_thrd->com_thrd[i].low)); + + rx_com_thrd = (struct hclge_rx_com_thrd *)desc[1].data; + for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++) + pos += scnprintf(buf + pos, len - pos, + "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", + i + HCLGE_TC_NUM_ONE_DESC, + le16_to_cpu(rx_com_thrd->com_thrd[i].high), + le16_to_cpu(rx_com_thrd->com_thrd[i].low)); + + return pos; +} + +static int hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev, char *buf, + int len) +{ + int pos = 0; + int ret; + + ret = hclge_dbg_dump_tx_buf_cfg(hdev, buf + pos, len - pos); + if (ret < 0) + return ret; + pos += ret; + + ret = hclge_dbg_dump_rx_priv_buf_cfg(hdev, buf + pos, len - pos); + if (ret < 0) + return ret; + pos += ret; + + ret = hclge_dbg_dump_rx_common_wl_cfg(hdev, buf + pos, len - pos); + if (ret < 0) + return ret; + pos += ret; + + ret = hclge_dbg_dump_rx_global_pkt_cnt(hdev, buf + pos, len - pos); + if (ret < 0) + return ret; + pos += ret; + + pos += scnprintf(buf + pos, len - pos, "\n"); + if (!hnae3_dev_dcb_supported(hdev)) + return 0; + + ret = hclge_dbg_dump_rx_priv_wl_buf_cfg(hdev, buf + pos, len - pos); + if (ret < 0) + return ret; + pos += ret; + + ret = hclge_dbg_dump_rx_common_threshold_cfg(hdev, buf + pos, + len - pos); + if (ret < 0) + return ret; + + return 0; +} + +static int hclge_dbg_dump_mng_table(struct hclge_dev *hdev, char *buf, int len) +{ + struct hclge_mac_ethertype_idx_rd_cmd *req0; + struct hclge_desc desc; + u32 msg_egress_port; + int pos = 0; + int ret, i; + + pos += scnprintf(buf + pos, len - pos, + "entry mac_addr mask ether "); + pos += scnprintf(buf + pos, len - pos, + "mask vlan mask i_map i_dir e_type "); + pos += scnprintf(buf + pos, len - pos, "pf_id vf_id q_id drop\n"); + + for (i = 0; i < HCLGE_DBG_MNG_TBL_MAX; i++) { + hclge_cmd_setup_basic_desc(&desc, HCLGE_MAC_ETHERTYPE_IDX_RD, + true); + req0 = (struct hclge_mac_ethertype_idx_rd_cmd *)&desc.data; + req0->index = cpu_to_le16(i); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to dump manage table, ret = %d\n", ret); + return ret; + } + + if (!req0->resp_code) + continue; + + pos += scnprintf(buf + pos, len - pos, "%02u %pM ", + le16_to_cpu(req0->index), req0->mac_addr); + + pos += scnprintf(buf + pos, len - pos, + "%x %04x %x %04x ", + !!(req0->flags & HCLGE_DBG_MNG_MAC_MASK_B), + le16_to_cpu(req0->ethter_type), + !!(req0->flags & HCLGE_DBG_MNG_ETHER_MASK_B), + le16_to_cpu(req0->vlan_tag) & + HCLGE_DBG_MNG_VLAN_TAG); + + pos += scnprintf(buf + pos, len - pos, + "%x %02x %02x ", + !!(req0->flags & HCLGE_DBG_MNG_VLAN_MASK_B), + req0->i_port_bitmap, req0->i_port_direction); + + msg_egress_port = le16_to_cpu(req0->egress_port); + pos += scnprintf(buf + pos, len - pos, + "%x %x %02x %04x %x\n", + !!(msg_egress_port & HCLGE_DBG_MNG_E_TYPE_B), + msg_egress_port & HCLGE_DBG_MNG_PF_ID, + (msg_egress_port >> 3) & HCLGE_DBG_MNG_VF_ID, + le16_to_cpu(req0->egress_queue), + !!(msg_egress_port & HCLGE_DBG_MNG_DROP_B)); + } + + return 0; +} + +#define HCLGE_DBG_TCAM_BUF_SIZE 256 + +static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, bool sel_x, + char *tcam_buf, + struct hclge_dbg_tcam_msg tcam_msg) +{ + struct hclge_fd_tcam_config_1_cmd *req1; + struct hclge_fd_tcam_config_2_cmd *req2; + struct hclge_fd_tcam_config_3_cmd *req3; + struct hclge_desc desc[3]; + int pos = 0; + int ret, i; + __le32 *req; + + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, true); + desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, true); + desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, true); + + req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data; + req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data; + req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data; + + req1->stage = tcam_msg.stage; + req1->xy_sel = sel_x ? 1 : 0; + req1->index = cpu_to_le32(tcam_msg.loc); + + ret = hclge_cmd_send(&hdev->hw, desc, 3); + if (ret) + return ret; + + pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos, + "read result tcam key %s(%u):\n", sel_x ? "x" : "y", + tcam_msg.loc); + + /* tcam_data0 ~ tcam_data1 */ + req = (__le32 *)req1->tcam_data; + for (i = 0; i < 2; i++) + pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos, + "%08x\n", le32_to_cpu(*req++)); + + /* tcam_data2 ~ tcam_data7 */ + req = (__le32 *)req2->tcam_data; + for (i = 0; i < 6; i++) + pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos, + "%08x\n", le32_to_cpu(*req++)); + + /* tcam_data8 ~ tcam_data12 */ + req = (__le32 *)req3->tcam_data; + for (i = 0; i < 5; i++) + pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos, + "%08x\n", le32_to_cpu(*req++)); + + return ret; +} + +static int hclge_dbg_get_rules_location(struct hclge_dev *hdev, u16 *rule_locs) +{ + struct hclge_fd_rule *rule; + struct hlist_node *node; + int cnt = 0; + + spin_lock_bh(&hdev->fd_rule_lock); + hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { + rule_locs[cnt] = rule->location; + cnt++; + } + spin_unlock_bh(&hdev->fd_rule_lock); + + if (cnt != hdev->hclge_fd_rule_num || cnt == 0) + return -EINVAL; + + return cnt; +} + +static int hclge_dbg_dump_fd_tcam(struct hclge_dev *hdev, char *buf, int len) +{ + u32 rule_num = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]; + struct hclge_dbg_tcam_msg tcam_msg; + int i, ret, rule_cnt; + u16 *rule_locs; + char *tcam_buf; + int pos = 0; + + if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) { + dev_err(&hdev->pdev->dev, + "Only FD-supported dev supports dump fd tcam\n"); + return -EOPNOTSUPP; + } + + if (!hdev->hclge_fd_rule_num || !rule_num) + return 0; + + rule_locs = kcalloc(rule_num, sizeof(u16), GFP_KERNEL); + if (!rule_locs) + return -ENOMEM; + + tcam_buf = kzalloc(HCLGE_DBG_TCAM_BUF_SIZE, GFP_KERNEL); + if (!tcam_buf) { + kfree(rule_locs); + return -ENOMEM; + } + + rule_cnt = hclge_dbg_get_rules_location(hdev, rule_locs); + if (rule_cnt < 0) { + ret = rule_cnt; + dev_err(&hdev->pdev->dev, + "failed to get rule number, ret = %d\n", ret); + goto out; + } + + ret = 0; + for (i = 0; i < rule_cnt; i++) { + tcam_msg.stage = HCLGE_FD_STAGE_1; + tcam_msg.loc = rule_locs[i]; + + ret = hclge_dbg_fd_tcam_read(hdev, true, tcam_buf, tcam_msg); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get fd tcam key x, ret = %d\n", ret); + goto out; + } + + pos += scnprintf(buf + pos, len - pos, "%s", tcam_buf); + + ret = hclge_dbg_fd_tcam_read(hdev, false, tcam_buf, tcam_msg); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get fd tcam key y, ret = %d\n", ret); + goto out; + } + + pos += scnprintf(buf + pos, len - pos, "%s", tcam_buf); + } + +out: + kfree(tcam_buf); + kfree(rule_locs); + return ret; +} + +static int hclge_dbg_dump_fd_counter(struct hclge_dev *hdev, char *buf, int len) +{ + u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */ + struct hclge_fd_ad_cnt_read_cmd *req; + char str_id[HCLGE_DBG_ID_LEN]; + struct hclge_desc desc; + int pos = 0; + int ret; + u64 cnt; + u8 i; + + if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) + return -EOPNOTSUPP; + + pos += scnprintf(buf + pos, len - pos, + "func_id\thit_times\n"); + + for (i = 0; i < func_num; i++) { + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_CNT_OP, true); + req = (struct hclge_fd_ad_cnt_read_cmd *)desc.data; + req->index = cpu_to_le16(i); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, "failed to get fd counter, ret = %d\n", + ret); + return ret; + } + cnt = le64_to_cpu(req->cnt); + hclge_dbg_get_func_id_str(str_id, i); + pos += scnprintf(buf + pos, len - pos, + "%s\t%llu\n", str_id, cnt); + } + + return 0; +} + +static const struct hclge_dbg_status_dfx_info hclge_dbg_rst_info[] = { + {HCLGE_MISC_VECTOR_REG_BASE, "vector0 interrupt enable status"}, + {HCLGE_MISC_RESET_STS_REG, "reset interrupt source"}, + {HCLGE_MISC_VECTOR_INT_STS, "reset interrupt status"}, + {HCLGE_RAS_PF_OTHER_INT_STS_REG, "RAS interrupt status"}, + {HCLGE_GLOBAL_RESET_REG, "hardware reset status"}, + {HCLGE_NIC_CSQ_DEPTH_REG, "handshake status"}, + {HCLGE_FUN_RST_ING, "function reset status"} +}; + +int hclge_dbg_dump_rst_info(struct hclge_dev *hdev, char *buf, int len) +{ + u32 i, offset; + int pos = 0; + + pos += scnprintf(buf + pos, len - pos, "PF reset count: %u\n", + hdev->rst_stats.pf_rst_cnt); + pos += scnprintf(buf + pos, len - pos, "FLR reset count: %u\n", + hdev->rst_stats.flr_rst_cnt); + pos += scnprintf(buf + pos, len - pos, "GLOBAL reset count: %u\n", + hdev->rst_stats.global_rst_cnt); + pos += scnprintf(buf + pos, len - pos, "IMP reset count: %u\n", + hdev->rst_stats.imp_rst_cnt); + pos += scnprintf(buf + pos, len - pos, "reset done count: %u\n", + hdev->rst_stats.reset_done_cnt); + pos += scnprintf(buf + pos, len - pos, "HW reset done count: %u\n", + hdev->rst_stats.hw_reset_done_cnt); + pos += scnprintf(buf + pos, len - pos, "reset count: %u\n", + hdev->rst_stats.reset_cnt); + pos += scnprintf(buf + pos, len - pos, "reset fail count: %u\n", + hdev->rst_stats.reset_fail_cnt); + + for (i = 0; i < ARRAY_SIZE(hclge_dbg_rst_info); i++) { + offset = hclge_dbg_rst_info[i].offset; + pos += scnprintf(buf + pos, len - pos, "%s: 0x%x\n", + hclge_dbg_rst_info[i].message, + hclge_read_dev(&hdev->hw, offset)); + } + + pos += scnprintf(buf + pos, len - pos, "hdev state: 0x%lx\n", + hdev->state); + + return 0; +} + +static int hclge_dbg_dump_serv_info(struct hclge_dev *hdev, char *buf, int len) +{ + unsigned long rem_nsec; + int pos = 0; + u64 lc; + + lc = local_clock(); + rem_nsec = do_div(lc, HCLGE_BILLION_NANO_SECONDS); + + pos += scnprintf(buf + pos, len - pos, "local_clock: [%5lu.%06lu]\n", + (unsigned long)lc, rem_nsec / 1000); + pos += scnprintf(buf + pos, len - pos, "delta: %u(ms)\n", + jiffies_to_msecs(jiffies - hdev->last_serv_processed)); + pos += scnprintf(buf + pos, len - pos, + "last_service_task_processed: %lu(jiffies)\n", + hdev->last_serv_processed); + pos += scnprintf(buf + pos, len - pos, "last_service_task_cnt: %lu\n", + hdev->serv_processed_cnt); + + return 0; +} + +static int hclge_dbg_dump_interrupt(struct hclge_dev *hdev, char *buf, int len) +{ + int pos = 0; + + pos += scnprintf(buf + pos, len - pos, "num_nic_msi: %u\n", + hdev->num_nic_msi); + pos += scnprintf(buf + pos, len - pos, "num_roce_msi: %u\n", + hdev->num_roce_msi); + pos += scnprintf(buf + pos, len - pos, "num_msi_used: %u\n", + hdev->num_msi_used); + pos += scnprintf(buf + pos, len - pos, "num_msi_left: %u\n", + hdev->num_msi_left); + + return 0; +} + +static void hclge_dbg_imp_info_data_print(struct hclge_desc *desc_src, + char *buf, int len, u32 bd_num) +{ +#define HCLGE_DBG_IMP_INFO_PRINT_OFFSET 0x2 + + struct hclge_desc *desc_index = desc_src; + u32 offset = 0; + int pos = 0; + u32 i, j; + + pos += scnprintf(buf + pos, len - pos, "offset | data\n"); + + for (i = 0; i < bd_num; i++) { + j = 0; + while (j < HCLGE_DESC_DATA_LEN - 1) { + pos += scnprintf(buf + pos, len - pos, "0x%04x | ", + offset); + pos += scnprintf(buf + pos, len - pos, "0x%08x ", + le32_to_cpu(desc_index->data[j++])); + pos += scnprintf(buf + pos, len - pos, "0x%08x\n", + le32_to_cpu(desc_index->data[j++])); + offset += sizeof(u32) * HCLGE_DBG_IMP_INFO_PRINT_OFFSET; + } + desc_index++; + } +} + +static int +hclge_dbg_get_imp_stats_info(struct hclge_dev *hdev, char *buf, int len) +{ + struct hclge_get_imp_bd_cmd *req; + struct hclge_desc *desc_src; + struct hclge_desc desc; + u32 bd_num; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_IMP_STATS_BD, true); + + req = (struct hclge_get_imp_bd_cmd *)desc.data; + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get imp statistics bd number, ret = %d\n", + ret); + return ret; + } + + bd_num = le32_to_cpu(req->bd_num); + if (!bd_num) { + dev_err(&hdev->pdev->dev, "imp statistics bd number is 0!\n"); + return -EINVAL; + } + + desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL); + if (!desc_src) + return -ENOMEM; + + ret = hclge_dbg_cmd_send(hdev, desc_src, 0, bd_num, + HCLGE_OPC_IMP_STATS_INFO); + if (ret) { + kfree(desc_src); + dev_err(&hdev->pdev->dev, + "failed to get imp statistics, ret = %d\n", ret); + return ret; + } + + hclge_dbg_imp_info_data_print(desc_src, buf, len, bd_num); + + kfree(desc_src); + + return 0; +} + +#define HCLGE_CMD_NCL_CONFIG_BD_NUM 5 +#define HCLGE_MAX_NCL_CONFIG_LENGTH 16384 + +static void hclge_ncl_config_data_print(struct hclge_desc *desc, int *index, + char *buf, int len, int *pos) +{ +#define HCLGE_CMD_DATA_NUM 6 + + int offset = HCLGE_MAX_NCL_CONFIG_LENGTH - *index; + int i, j; + + for (i = 0; i < HCLGE_CMD_NCL_CONFIG_BD_NUM; i++) { + for (j = 0; j < HCLGE_CMD_DATA_NUM; j++) { + if (i == 0 && j == 0) + continue; + + *pos += scnprintf(buf + *pos, len - *pos, + "0x%04x | 0x%08x\n", offset, + le32_to_cpu(desc[i].data[j])); + + offset += sizeof(u32); + *index -= sizeof(u32); + + if (*index <= 0) + return; + } + } +} + +static int +hclge_dbg_dump_ncl_config(struct hclge_dev *hdev, char *buf, int len) +{ +#define HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD (20 + 24 * 4) + + struct hclge_desc desc[HCLGE_CMD_NCL_CONFIG_BD_NUM]; + int bd_num = HCLGE_CMD_NCL_CONFIG_BD_NUM; + int index = HCLGE_MAX_NCL_CONFIG_LENGTH; + int pos = 0; + u32 data0; + int ret; + + pos += scnprintf(buf + pos, len - pos, "offset | data\n"); + + while (index > 0) { + data0 = HCLGE_MAX_NCL_CONFIG_LENGTH - index; + if (index >= HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD) + data0 |= HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD << 16; + else + data0 |= (u32)index << 16; + ret = hclge_dbg_cmd_send(hdev, desc, data0, bd_num, + HCLGE_OPC_QUERY_NCL_CONFIG); + if (ret) + return ret; + + hclge_ncl_config_data_print(desc, &index, buf, len, &pos); + } + + return 0; +} + +static int hclge_dbg_dump_loopback(struct hclge_dev *hdev, char *buf, int len) +{ + struct phy_device *phydev = hdev->hw.mac.phydev; + struct hclge_config_mac_mode_cmd *req_app; + struct hclge_common_lb_cmd *req_common; + struct hclge_desc desc; + u8 loopback_en; + int pos = 0; + int ret; + + req_app = (struct hclge_config_mac_mode_cmd *)desc.data; + req_common = (struct hclge_common_lb_cmd *)desc.data; + + pos += scnprintf(buf + pos, len - pos, "mac id: %u\n", + hdev->hw.mac.mac_id); + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to dump app loopback status, ret = %d\n", ret); + return ret; + } + + loopback_en = hnae3_get_bit(le32_to_cpu(req_app->txrx_pad_fcs_loop_en), + HCLGE_MAC_APP_LP_B); + pos += scnprintf(buf + pos, len - pos, "app loopback: %s\n", + state_str[loopback_en]); + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to dump common loopback status, ret = %d\n", + ret); + return ret; + } + + loopback_en = req_common->enable & HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B; + pos += scnprintf(buf + pos, len - pos, "serdes serial loopback: %s\n", + state_str[loopback_en]); + + loopback_en = req_common->enable & + HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B ? 1 : 0; + pos += scnprintf(buf + pos, len - pos, "serdes parallel loopback: %s\n", + state_str[loopback_en]); + + if (phydev) { + loopback_en = phydev->loopback_enabled; + pos += scnprintf(buf + pos, len - pos, "phy loopback: %s\n", + state_str[loopback_en]); + } else if (hnae3_dev_phy_imp_supported(hdev)) { + loopback_en = req_common->enable & + HCLGE_CMD_GE_PHY_INNER_LOOP_B; + pos += scnprintf(buf + pos, len - pos, "phy loopback: %s\n", + state_str[loopback_en]); + } + + return 0; +} + +/* hclge_dbg_dump_mac_tnl_status: print message about mac tnl interrupt + * @hdev: pointer to struct hclge_dev + */ +static int +hclge_dbg_dump_mac_tnl_status(struct hclge_dev *hdev, char *buf, int len) +{ + struct hclge_mac_tnl_stats stats; + unsigned long rem_nsec; + int pos = 0; + + pos += scnprintf(buf + pos, len - pos, + "Recently generated mac tnl interruption:\n"); + + while (kfifo_get(&hdev->mac_tnl_log, &stats)) { + rem_nsec = do_div(stats.time, HCLGE_BILLION_NANO_SECONDS); + + pos += scnprintf(buf + pos, len - pos, + "[%07lu.%03lu] status = 0x%x\n", + (unsigned long)stats.time, rem_nsec / 1000, + stats.status); + } + + return 0; +} + + +static const struct hclge_dbg_item mac_list_items[] = { + { "FUNC_ID", 2 }, + { "MAC_ADDR", 12 }, + { "STATE", 2 }, +}; + +static void hclge_dbg_dump_mac_list(struct hclge_dev *hdev, char *buf, int len, + bool is_unicast) +{ + char data_str[ARRAY_SIZE(mac_list_items)][HCLGE_DBG_DATA_STR_LEN]; + char content[HCLGE_DBG_INFO_LEN], str_id[HCLGE_DBG_ID_LEN]; + char *result[ARRAY_SIZE(mac_list_items)]; + struct hclge_mac_node *mac_node, *tmp; + struct hclge_vport *vport; + struct list_head *list; + u32 func_id; + int pos = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(mac_list_items); i++) + result[i] = &data_str[i][0]; + + pos += scnprintf(buf + pos, len - pos, "%s MAC_LIST:\n", + is_unicast ? "UC" : "MC"); + hclge_dbg_fill_content(content, sizeof(content), mac_list_items, + NULL, ARRAY_SIZE(mac_list_items)); + pos += scnprintf(buf + pos, len - pos, "%s", content); + + for (func_id = 0; func_id < hdev->num_alloc_vport; func_id++) { + vport = &hdev->vport[func_id]; + list = is_unicast ? &vport->uc_mac_list : &vport->mc_mac_list; + spin_lock_bh(&vport->mac_list_lock); + list_for_each_entry_safe(mac_node, tmp, list, node) { + i = 0; + result[i++] = hclge_dbg_get_func_id_str(str_id, + func_id); + sprintf(result[i++], "%pM", mac_node->mac_addr); + sprintf(result[i++], "%5s", + hclge_mac_state_str[mac_node->state]); + hclge_dbg_fill_content(content, sizeof(content), + mac_list_items, + (const char **)result, + ARRAY_SIZE(mac_list_items)); + pos += scnprintf(buf + pos, len - pos, "%s", content); + } + spin_unlock_bh(&vport->mac_list_lock); + } +} + +static int hclge_dbg_dump_umv_info(struct hclge_dev *hdev, char *buf, int len) +{ + u8 func_num = pci_num_vf(hdev->pdev) + 1; + struct hclge_vport *vport; + int pos = 0; + u8 i; + + pos += scnprintf(buf, len, "num_alloc_vport : %u\n", + hdev->num_alloc_vport); + pos += scnprintf(buf + pos, len - pos, "max_umv_size : %u\n", + hdev->max_umv_size); + pos += scnprintf(buf + pos, len - pos, "wanted_umv_size : %u\n", + hdev->wanted_umv_size); + pos += scnprintf(buf + pos, len - pos, "priv_umv_size : %u\n", + hdev->priv_umv_size); + + mutex_lock(&hdev->vport_lock); + pos += scnprintf(buf + pos, len - pos, "share_umv_size : %u\n", + hdev->share_umv_size); + for (i = 0; i < func_num; i++) { + vport = &hdev->vport[i]; + pos += scnprintf(buf + pos, len - pos, + "vport(%u) used_umv_num : %u\n", + i, vport->used_umv_num); + } + mutex_unlock(&hdev->vport_lock); + + pos += scnprintf(buf + pos, len - pos, "used_mc_mac_num : %u\n", + hdev->used_mc_mac_num); + + return 0; +} + +static int hclge_get_vlan_rx_offload_cfg(struct hclge_dev *hdev, u8 vf_id, + struct hclge_dbg_vlan_cfg *vlan_cfg) +{ + struct hclge_vport_vtag_rx_cfg_cmd *req; + struct hclge_desc desc; + u16 bmap_index; + u8 rx_cfg; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, true); + + req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data; + req->vf_offset = vf_id / HCLGE_VF_NUM_PER_CMD; + bmap_index = vf_id % HCLGE_VF_NUM_PER_CMD / HCLGE_VF_NUM_PER_BYTE; + req->vf_bitmap[bmap_index] = 1U << (vf_id % HCLGE_VF_NUM_PER_BYTE); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get vport%u rxvlan cfg, ret = %d\n", + vf_id, ret); + return ret; + } + + rx_cfg = req->vport_vlan_cfg; + vlan_cfg->strip_tag1 = hnae3_get_bit(rx_cfg, HCLGE_REM_TAG1_EN_B); + vlan_cfg->strip_tag2 = hnae3_get_bit(rx_cfg, HCLGE_REM_TAG2_EN_B); + vlan_cfg->drop_tag1 = hnae3_get_bit(rx_cfg, HCLGE_DISCARD_TAG1_EN_B); + vlan_cfg->drop_tag2 = hnae3_get_bit(rx_cfg, HCLGE_DISCARD_TAG2_EN_B); + vlan_cfg->pri_only1 = hnae3_get_bit(rx_cfg, HCLGE_SHOW_TAG1_EN_B); + vlan_cfg->pri_only2 = hnae3_get_bit(rx_cfg, HCLGE_SHOW_TAG2_EN_B); + + return 0; +} + +static int hclge_get_vlan_tx_offload_cfg(struct hclge_dev *hdev, u8 vf_id, + struct hclge_dbg_vlan_cfg *vlan_cfg) +{ + struct hclge_vport_vtag_tx_cfg_cmd *req; + struct hclge_desc desc; + u16 bmap_index; + u8 tx_cfg; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, true); + req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data; + req->vf_offset = vf_id / HCLGE_VF_NUM_PER_CMD; + bmap_index = vf_id % HCLGE_VF_NUM_PER_CMD / HCLGE_VF_NUM_PER_BYTE; + req->vf_bitmap[bmap_index] = 1U << (vf_id % HCLGE_VF_NUM_PER_BYTE); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get vport%u txvlan cfg, ret = %d\n", + vf_id, ret); + return ret; + } + + tx_cfg = req->vport_vlan_cfg; + vlan_cfg->pvid = le16_to_cpu(req->def_vlan_tag1); + + vlan_cfg->accept_tag1 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_TAG1_B); + vlan_cfg->accept_tag2 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_TAG2_B); + vlan_cfg->accept_untag1 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_UNTAG1_B); + vlan_cfg->accept_untag2 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_UNTAG2_B); + vlan_cfg->insert_tag1 = hnae3_get_bit(tx_cfg, HCLGE_PORT_INS_TAG1_EN_B); + vlan_cfg->insert_tag2 = hnae3_get_bit(tx_cfg, HCLGE_PORT_INS_TAG2_EN_B); + vlan_cfg->shift_tag = hnae3_get_bit(tx_cfg, HCLGE_TAG_SHIFT_MODE_EN_B); + + return 0; +} + +static int hclge_get_vlan_filter_config_cmd(struct hclge_dev *hdev, + u8 vlan_type, u8 vf_id, + struct hclge_desc *desc) +{ + struct hclge_vlan_filter_ctrl_cmd *req; + int ret; + + hclge_cmd_setup_basic_desc(desc, HCLGE_OPC_VLAN_FILTER_CTRL, true); + req = (struct hclge_vlan_filter_ctrl_cmd *)desc->data; + req->vlan_type = vlan_type; + req->vf_id = vf_id; + + ret = hclge_cmd_send(&hdev->hw, desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to get vport%u vlan filter config, ret = %d.\n", + vf_id, ret); + + return ret; +} + +static int hclge_get_vlan_filter_state(struct hclge_dev *hdev, u8 vlan_type, + u8 vf_id, u8 *vlan_fe) +{ + struct hclge_vlan_filter_ctrl_cmd *req; + struct hclge_desc desc; + int ret; + + ret = hclge_get_vlan_filter_config_cmd(hdev, vlan_type, vf_id, &desc); + if (ret) + return ret; + + req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data; + *vlan_fe = req->vlan_fe; + + return 0; +} + +static int hclge_get_port_vlan_filter_bypass_state(struct hclge_dev *hdev, + u8 vf_id, u8 *bypass_en) +{ + struct hclge_port_vlan_filter_bypass_cmd *req; + struct hclge_desc desc; + int ret; + + if (!test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, hdev->ae_dev->caps)) + return 0; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PORT_VLAN_BYPASS, true); + req = (struct hclge_port_vlan_filter_bypass_cmd *)desc.data; + req->vf_id = vf_id; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get vport%u port vlan filter bypass state, ret = %d.\n", + vf_id, ret); + return ret; + } + + *bypass_en = hnae3_get_bit(req->bypass_state, HCLGE_INGRESS_BYPASS_B); + + return 0; +} + +static const struct hclge_dbg_item vlan_filter_items[] = { + { "FUNC_ID", 2 }, + { "I_VF_VLAN_FILTER", 2 }, + { "E_VF_VLAN_FILTER", 2 }, + { "PORT_VLAN_FILTER_BYPASS", 0 } +}; + +static const struct hclge_dbg_item vlan_offload_items[] = { + { "FUNC_ID", 2 }, + { "PVID", 4 }, + { "ACCEPT_TAG1", 2 }, + { "ACCEPT_TAG2", 2 }, + { "ACCEPT_UNTAG1", 2 }, + { "ACCEPT_UNTAG2", 2 }, + { "INSERT_TAG1", 2 }, + { "INSERT_TAG2", 2 }, + { "SHIFT_TAG", 2 }, + { "STRIP_TAG1", 2 }, + { "STRIP_TAG2", 2 }, + { "DROP_TAG1", 2 }, + { "DROP_TAG2", 2 }, + { "PRI_ONLY_TAG1", 2 }, + { "PRI_ONLY_TAG2", 0 } +}; + +static int hclge_dbg_dump_vlan_filter_config(struct hclge_dev *hdev, char *buf, + int len, int *pos) +{ + char content[HCLGE_DBG_VLAN_FLTR_INFO_LEN], str_id[HCLGE_DBG_ID_LEN]; + const char *result[ARRAY_SIZE(vlan_filter_items)]; + u8 i, j, vlan_fe, bypass, ingress, egress; + u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */ + int ret; + + ret = hclge_get_vlan_filter_state(hdev, HCLGE_FILTER_TYPE_PORT, 0, + &vlan_fe); + if (ret) + return ret; + ingress = vlan_fe & HCLGE_FILTER_FE_NIC_INGRESS_B; + egress = vlan_fe & HCLGE_FILTER_FE_NIC_EGRESS_B ? 1 : 0; + + *pos += scnprintf(buf, len, "I_PORT_VLAN_FILTER: %s\n", + state_str[ingress]); + *pos += scnprintf(buf + *pos, len - *pos, "E_PORT_VLAN_FILTER: %s\n", + state_str[egress]); + + hclge_dbg_fill_content(content, sizeof(content), vlan_filter_items, + NULL, ARRAY_SIZE(vlan_filter_items)); + *pos += scnprintf(buf + *pos, len - *pos, "%s", content); + + for (i = 0; i < func_num; i++) { + ret = hclge_get_vlan_filter_state(hdev, HCLGE_FILTER_TYPE_VF, i, + &vlan_fe); + if (ret) + return ret; + + ingress = vlan_fe & HCLGE_FILTER_FE_NIC_INGRESS_B; + egress = vlan_fe & HCLGE_FILTER_FE_NIC_EGRESS_B ? 1 : 0; + ret = hclge_get_port_vlan_filter_bypass_state(hdev, i, &bypass); + if (ret) + return ret; + j = 0; + result[j++] = hclge_dbg_get_func_id_str(str_id, i); + result[j++] = state_str[ingress]; + result[j++] = state_str[egress]; + result[j++] = + test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, + hdev->ae_dev->caps) ? state_str[bypass] : "NA"; + hclge_dbg_fill_content(content, sizeof(content), + vlan_filter_items, result, + ARRAY_SIZE(vlan_filter_items)); + *pos += scnprintf(buf + *pos, len - *pos, "%s", content); + } + *pos += scnprintf(buf + *pos, len - *pos, "\n"); + + return 0; +} + +static int hclge_dbg_dump_vlan_offload_config(struct hclge_dev *hdev, char *buf, + int len, int *pos) +{ + char str_id[HCLGE_DBG_ID_LEN], str_pvid[HCLGE_DBG_ID_LEN]; + const char *result[ARRAY_SIZE(vlan_offload_items)]; + char content[HCLGE_DBG_VLAN_OFFLOAD_INFO_LEN]; + u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */ + struct hclge_dbg_vlan_cfg vlan_cfg; + int ret; + u8 i, j; + + hclge_dbg_fill_content(content, sizeof(content), vlan_offload_items, + NULL, ARRAY_SIZE(vlan_offload_items)); + *pos += scnprintf(buf + *pos, len - *pos, "%s", content); + + for (i = 0; i < func_num; i++) { + ret = hclge_get_vlan_tx_offload_cfg(hdev, i, &vlan_cfg); + if (ret) + return ret; + + ret = hclge_get_vlan_rx_offload_cfg(hdev, i, &vlan_cfg); + if (ret) + return ret; + + sprintf(str_pvid, "%u", vlan_cfg.pvid); + j = 0; + result[j++] = hclge_dbg_get_func_id_str(str_id, i); + result[j++] = str_pvid; + result[j++] = state_str[vlan_cfg.accept_tag1]; + result[j++] = state_str[vlan_cfg.accept_tag2]; + result[j++] = state_str[vlan_cfg.accept_untag1]; + result[j++] = state_str[vlan_cfg.accept_untag2]; + result[j++] = state_str[vlan_cfg.insert_tag1]; + result[j++] = state_str[vlan_cfg.insert_tag2]; + result[j++] = state_str[vlan_cfg.shift_tag]; + result[j++] = state_str[vlan_cfg.strip_tag1]; + result[j++] = state_str[vlan_cfg.strip_tag2]; + result[j++] = state_str[vlan_cfg.drop_tag1]; + result[j++] = state_str[vlan_cfg.drop_tag2]; + result[j++] = state_str[vlan_cfg.pri_only1]; + result[j++] = state_str[vlan_cfg.pri_only2]; + + hclge_dbg_fill_content(content, sizeof(content), + vlan_offload_items, result, + ARRAY_SIZE(vlan_offload_items)); + *pos += scnprintf(buf + *pos, len - *pos, "%s", content); + } + + return 0; +} + +static int hclge_dbg_dump_vlan_config(struct hclge_dev *hdev, char *buf, + int len) +{ + int pos = 0; + int ret; + + ret = hclge_dbg_dump_vlan_filter_config(hdev, buf, len, &pos); + if (ret) + return ret; + + return hclge_dbg_dump_vlan_offload_config(hdev, buf, len, &pos); +} + +static int hclge_dbg_dump_ptp_info(struct hclge_dev *hdev, char *buf, int len) +{ + struct hclge_ptp *ptp = hdev->ptp; + u32 sw_cfg = ptp->ptp_cfg; + unsigned int tx_start; + unsigned int last_rx; + int pos = 0; + u32 hw_cfg; + int ret; + + pos += scnprintf(buf + pos, len - pos, "phc %s's debug info:\n", + ptp->info.name); + pos += scnprintf(buf + pos, len - pos, "ptp enable: %s\n", + test_bit(HCLGE_PTP_FLAG_EN, &ptp->flags) ? + "yes" : "no"); + pos += scnprintf(buf + pos, len - pos, "ptp tx enable: %s\n", + test_bit(HCLGE_PTP_FLAG_TX_EN, &ptp->flags) ? + "yes" : "no"); + pos += scnprintf(buf + pos, len - pos, "ptp rx enable: %s\n", + test_bit(HCLGE_PTP_FLAG_RX_EN, &ptp->flags) ? + "yes" : "no"); + + last_rx = jiffies_to_msecs(ptp->last_rx); + pos += scnprintf(buf + pos, len - pos, "last rx time: %lu.%lu\n", + last_rx / MSEC_PER_SEC, last_rx % MSEC_PER_SEC); + pos += scnprintf(buf + pos, len - pos, "rx count: %lu\n", ptp->rx_cnt); + + tx_start = jiffies_to_msecs(ptp->tx_start); + pos += scnprintf(buf + pos, len - pos, "last tx start time: %lu.%lu\n", + tx_start / MSEC_PER_SEC, tx_start % MSEC_PER_SEC); + pos += scnprintf(buf + pos, len - pos, "tx count: %lu\n", ptp->tx_cnt); + pos += scnprintf(buf + pos, len - pos, "tx skipped count: %lu\n", + ptp->tx_skipped); + pos += scnprintf(buf + pos, len - pos, "tx timeout count: %lu\n", + ptp->tx_timeout); + pos += scnprintf(buf + pos, len - pos, "last tx seqid: %u\n", + ptp->last_tx_seqid); + + ret = hclge_ptp_cfg_qry(hdev, &hw_cfg); + if (ret) + return ret; + + pos += scnprintf(buf + pos, len - pos, "sw_cfg: %#x, hw_cfg: %#x\n", + sw_cfg, hw_cfg); + + pos += scnprintf(buf + pos, len - pos, "tx type: %d, rx filter: %d\n", + ptp->ts_cfg.tx_type, ptp->ts_cfg.rx_filter); + + return 0; +} + +static int hclge_dbg_dump_mac_uc(struct hclge_dev *hdev, char *buf, int len) +{ + hclge_dbg_dump_mac_list(hdev, buf, len, true); + + return 0; +} + +static int hclge_dbg_dump_mac_mc(struct hclge_dev *hdev, char *buf, int len) +{ + hclge_dbg_dump_mac_list(hdev, buf, len, false); + + return 0; +} + +static const struct hclge_dbg_func hclge_dbg_cmd_func[] = { + { + .cmd = HNAE3_DBG_CMD_TM_NODES, + .dbg_dump = hclge_dbg_dump_tm_nodes, + }, + { + .cmd = HNAE3_DBG_CMD_TM_PRI, + .dbg_dump = hclge_dbg_dump_tm_pri, + }, + { + .cmd = HNAE3_DBG_CMD_TM_QSET, + .dbg_dump = hclge_dbg_dump_tm_qset, + }, + { + .cmd = HNAE3_DBG_CMD_TM_MAP, + .dbg_dump = hclge_dbg_dump_tm_map, + }, + { + .cmd = HNAE3_DBG_CMD_TM_PG, + .dbg_dump = hclge_dbg_dump_tm_pg, + }, + { + .cmd = HNAE3_DBG_CMD_TM_PORT, + .dbg_dump = hclge_dbg_dump_tm_port, + }, + { + .cmd = HNAE3_DBG_CMD_TC_SCH_INFO, + .dbg_dump = hclge_dbg_dump_tc, + }, + { + .cmd = HNAE3_DBG_CMD_QOS_PAUSE_CFG, + .dbg_dump = hclge_dbg_dump_qos_pause_cfg, + }, + { + .cmd = HNAE3_DBG_CMD_QOS_PRI_MAP, + .dbg_dump = hclge_dbg_dump_qos_pri_map, + }, + { + .cmd = HNAE3_DBG_CMD_QOS_DSCP_MAP, + .dbg_dump = hclge_dbg_dump_qos_dscp_map, + }, + { + .cmd = HNAE3_DBG_CMD_QOS_BUF_CFG, + .dbg_dump = hclge_dbg_dump_qos_buf_cfg, + }, + { + .cmd = HNAE3_DBG_CMD_MAC_UC, + .dbg_dump = hclge_dbg_dump_mac_uc, + }, + { + .cmd = HNAE3_DBG_CMD_MAC_MC, + .dbg_dump = hclge_dbg_dump_mac_mc, + }, + { + .cmd = HNAE3_DBG_CMD_MNG_TBL, + .dbg_dump = hclge_dbg_dump_mng_table, + }, + { + .cmd = HNAE3_DBG_CMD_LOOPBACK, + .dbg_dump = hclge_dbg_dump_loopback, + }, + { + .cmd = HNAE3_DBG_CMD_PTP_INFO, + .dbg_dump = hclge_dbg_dump_ptp_info, + }, + { + .cmd = HNAE3_DBG_CMD_INTERRUPT_INFO, + .dbg_dump = hclge_dbg_dump_interrupt, + }, + { + .cmd = HNAE3_DBG_CMD_RESET_INFO, + .dbg_dump = hclge_dbg_dump_rst_info, + }, + { + .cmd = HNAE3_DBG_CMD_IMP_INFO, + .dbg_dump = hclge_dbg_get_imp_stats_info, + }, + { + .cmd = HNAE3_DBG_CMD_NCL_CONFIG, + .dbg_dump = hclge_dbg_dump_ncl_config, + }, + { + .cmd = HNAE3_DBG_CMD_REG_BIOS_COMMON, + .dbg_dump_reg = hclge_dbg_dump_reg_cmd, + }, + { + .cmd = HNAE3_DBG_CMD_REG_SSU, + .dbg_dump_reg = hclge_dbg_dump_reg_cmd, + }, + { + .cmd = HNAE3_DBG_CMD_REG_IGU_EGU, + .dbg_dump_reg = hclge_dbg_dump_reg_cmd, + }, + { + .cmd = HNAE3_DBG_CMD_REG_RPU, + .dbg_dump_reg = hclge_dbg_dump_reg_cmd, + }, + { + .cmd = HNAE3_DBG_CMD_REG_NCSI, + .dbg_dump_reg = hclge_dbg_dump_reg_cmd, + }, + { + .cmd = HNAE3_DBG_CMD_REG_RTC, + .dbg_dump_reg = hclge_dbg_dump_reg_cmd, + }, + { + .cmd = HNAE3_DBG_CMD_REG_PPP, + .dbg_dump_reg = hclge_dbg_dump_reg_cmd, + }, + { + .cmd = HNAE3_DBG_CMD_REG_RCB, + .dbg_dump_reg = hclge_dbg_dump_reg_cmd, + }, + { + .cmd = HNAE3_DBG_CMD_REG_TQP, + .dbg_dump_reg = hclge_dbg_dump_reg_cmd, + }, + { + .cmd = HNAE3_DBG_CMD_REG_MAC, + .dbg_dump = hclge_dbg_dump_mac, + }, + { + .cmd = HNAE3_DBG_CMD_REG_DCB, + .dbg_dump = hclge_dbg_dump_dcb, + }, + { + .cmd = HNAE3_DBG_CMD_FD_TCAM, + .dbg_dump = hclge_dbg_dump_fd_tcam, + }, + { + .cmd = HNAE3_DBG_CMD_MAC_TNL_STATUS, + .dbg_dump = hclge_dbg_dump_mac_tnl_status, + }, + { + .cmd = HNAE3_DBG_CMD_SERV_INFO, + .dbg_dump = hclge_dbg_dump_serv_info, + }, + { + .cmd = HNAE3_DBG_CMD_VLAN_CONFIG, + .dbg_dump = hclge_dbg_dump_vlan_config, + }, + { + .cmd = HNAE3_DBG_CMD_FD_COUNTER, + .dbg_dump = hclge_dbg_dump_fd_counter, + }, + { + .cmd = HNAE3_DBG_CMD_UMV_INFO, + .dbg_dump = hclge_dbg_dump_umv_info, + }, +}; + +int hclge_dbg_read_cmd(struct hnae3_handle *handle, enum hnae3_dbg_cmd cmd, + char *buf, int len) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + const struct hclge_dbg_func *cmd_func; + struct hclge_dev *hdev = vport->back; + u32 i; + + for (i = 0; i < ARRAY_SIZE(hclge_dbg_cmd_func); i++) { + if (cmd == hclge_dbg_cmd_func[i].cmd) { + cmd_func = &hclge_dbg_cmd_func[i]; + if (cmd_func->dbg_dump) + return cmd_func->dbg_dump(hdev, buf, len); + else + return cmd_func->dbg_dump_reg(hdev, cmd, buf, + len); + } + } + + dev_err(&hdev->pdev->dev, "invalid command(%d)\n", cmd); + return -EINVAL; +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h new file mode 100644 index 000000000..724052928 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h @@ -0,0 +1,774 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2018-2019 Hisilicon Limited. */ + +#ifndef __HCLGE_DEBUGFS_H +#define __HCLGE_DEBUGFS_H + +#include <linux/etherdevice.h> +#include "hclge_cmd.h" + +#define HCLGE_DBG_MNG_TBL_MAX 64 + +#define HCLGE_DBG_MNG_VLAN_MASK_B BIT(0) +#define HCLGE_DBG_MNG_MAC_MASK_B BIT(1) +#define HCLGE_DBG_MNG_ETHER_MASK_B BIT(2) +#define HCLGE_DBG_MNG_E_TYPE_B BIT(11) +#define HCLGE_DBG_MNG_DROP_B BIT(13) +#define HCLGE_DBG_MNG_VLAN_TAG 0x0FFF +#define HCLGE_DBG_MNG_PF_ID 0x0007 +#define HCLGE_DBG_MNG_VF_ID 0x00FF + +/* Get DFX BD number offset */ +#define HCLGE_DBG_DFX_BIOS_OFFSET 1 +#define HCLGE_DBG_DFX_SSU_0_OFFSET 2 +#define HCLGE_DBG_DFX_SSU_1_OFFSET 3 +#define HCLGE_DBG_DFX_IGU_OFFSET 4 +#define HCLGE_DBG_DFX_RPU_0_OFFSET 5 + +#define HCLGE_DBG_DFX_RPU_1_OFFSET 6 +#define HCLGE_DBG_DFX_NCSI_OFFSET 7 +#define HCLGE_DBG_DFX_RTC_OFFSET 8 +#define HCLGE_DBG_DFX_PPP_OFFSET 9 +#define HCLGE_DBG_DFX_RCB_OFFSET 10 +#define HCLGE_DBG_DFX_TQP_OFFSET 11 + +#define HCLGE_DBG_DFX_SSU_2_OFFSET 12 + +struct hclge_qos_pri_map_cmd { + u8 pri0_tc : 4, + pri1_tc : 4; + u8 pri2_tc : 4, + pri3_tc : 4; + u8 pri4_tc : 4, + pri5_tc : 4; + u8 pri6_tc : 4, + pri7_tc : 4; + u8 vlan_pri : 4, + rev : 4; +}; + +struct hclge_dbg_bitmap_cmd { + union { + u8 bitmap; + struct { + u8 bit0 : 1, + bit1 : 1, + bit2 : 1, + bit3 : 1, + bit4 : 1, + bit5 : 1, + bit6 : 1, + bit7 : 1; + }; + }; +}; + +struct hclge_dbg_reg_common_msg { + int msg_num; + int offset; + enum hclge_opcode_type cmd; +}; + +struct hclge_dbg_tcam_msg { + u8 stage; + u32 loc; +}; + +#define HCLGE_DBG_MAX_DFX_MSG_LEN 60 +struct hclge_dbg_dfx_message { + int flag; + char message[HCLGE_DBG_MAX_DFX_MSG_LEN]; +}; + +#define HCLGE_DBG_MAC_REG_TYPE_LEN 32 +struct hclge_dbg_reg_type_info { + enum hnae3_dbg_cmd cmd; + const struct hclge_dbg_dfx_message *dfx_msg; + struct hclge_dbg_reg_common_msg reg_msg; +}; + +struct hclge_dbg_func { + enum hnae3_dbg_cmd cmd; + int (*dbg_dump)(struct hclge_dev *hdev, char *buf, int len); + int (*dbg_dump_reg)(struct hclge_dev *hdev, enum hnae3_dbg_cmd cmd, + char *buf, int len); +}; + +struct hclge_dbg_status_dfx_info { + u32 offset; + char message[HCLGE_DBG_MAX_DFX_MSG_LEN]; +}; + +static const struct hclge_dbg_dfx_message hclge_dbg_bios_common_reg[] = { + {false, "Reserved"}, + {true, "BP_CPU_STATE"}, + {true, "DFX_MSIX_INFO_NIC_0"}, + {true, "DFX_MSIX_INFO_NIC_1"}, + {true, "DFX_MSIX_INFO_NIC_2"}, + {true, "DFX_MSIX_INFO_NIC_3"}, + + {true, "DFX_MSIX_INFO_ROC_0"}, + {true, "DFX_MSIX_INFO_ROC_1"}, + {true, "DFX_MSIX_INFO_ROC_2"}, + {true, "DFX_MSIX_INFO_ROC_3"}, + {false, "Reserved"}, + {false, "Reserved"}, +}; + +static const struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_0[] = { + {false, "Reserved"}, + {true, "SSU_ETS_PORT_STATUS"}, + {true, "SSU_ETS_TCG_STATUS"}, + {false, "Reserved"}, + {false, "Reserved"}, + {true, "SSU_BP_STATUS_0"}, + + {true, "SSU_BP_STATUS_1"}, + {true, "SSU_BP_STATUS_2"}, + {true, "SSU_BP_STATUS_3"}, + {true, "SSU_BP_STATUS_4"}, + {true, "SSU_BP_STATUS_5"}, + {true, "SSU_MAC_TX_PFC_IND"}, + + {true, "MAC_SSU_RX_PFC_IND"}, + {true, "BTMP_AGEING_ST_B0"}, + {true, "BTMP_AGEING_ST_B1"}, + {true, "BTMP_AGEING_ST_B2"}, + {false, "Reserved"}, + {false, "Reserved"}, + + {true, "FULL_DROP_NUM"}, + {true, "PART_DROP_NUM"}, + {true, "PPP_KEY_DROP_NUM"}, + {true, "PPP_RLT_DROP_NUM"}, + {true, "LO_PRI_UNICAST_RLT_DROP_NUM"}, + {true, "HI_PRI_MULTICAST_RLT_DROP_NUM"}, + + {true, "LO_PRI_MULTICAST_RLT_DROP_NUM"}, + {true, "NCSI_PACKET_CURR_BUFFER_CNT"}, + {true, "BTMP_AGEING_RLS_CNT_BANK0"}, + {true, "BTMP_AGEING_RLS_CNT_BANK1"}, + {true, "BTMP_AGEING_RLS_CNT_BANK2"}, + {true, "SSU_MB_RD_RLT_DROP_CNT"}, + + {true, "SSU_PPP_MAC_KEY_NUM_L"}, + {true, "SSU_PPP_MAC_KEY_NUM_H"}, + {true, "SSU_PPP_HOST_KEY_NUM_L"}, + {true, "SSU_PPP_HOST_KEY_NUM_H"}, + {true, "PPP_SSU_MAC_RLT_NUM_L"}, + {true, "PPP_SSU_MAC_RLT_NUM_H"}, + + {true, "PPP_SSU_HOST_RLT_NUM_L"}, + {true, "PPP_SSU_HOST_RLT_NUM_H"}, + {true, "NCSI_RX_PACKET_IN_CNT_L"}, + {true, "NCSI_RX_PACKET_IN_CNT_H"}, + {true, "NCSI_TX_PACKET_OUT_CNT_L"}, + {true, "NCSI_TX_PACKET_OUT_CNT_H"}, + + {true, "SSU_KEY_DROP_NUM"}, + {true, "MB_UNCOPY_NUM"}, + {true, "RX_OQ_DROP_PKT_CNT"}, + {true, "TX_OQ_DROP_PKT_CNT"}, + {true, "BANK_UNBALANCE_DROP_CNT"}, + {true, "BANK_UNBALANCE_RX_DROP_CNT"}, + + {true, "NIC_L2_ERR_DROP_PKT_CNT"}, + {true, "ROC_L2_ERR_DROP_PKT_CNT"}, + {true, "NIC_L2_ERR_DROP_PKT_CNT_RX"}, + {true, "ROC_L2_ERR_DROP_PKT_CNT_RX"}, + {true, "RX_OQ_GLB_DROP_PKT_CNT"}, + {false, "Reserved"}, + + {true, "LO_PRI_UNICAST_CUR_CNT"}, + {true, "HI_PRI_MULTICAST_CUR_CNT"}, + {true, "LO_PRI_MULTICAST_CUR_CNT"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, +}; + +static const struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_1[] = { + {true, "prt_id"}, + {true, "PACKET_TC_CURR_BUFFER_CNT_0"}, + {true, "PACKET_TC_CURR_BUFFER_CNT_1"}, + {true, "PACKET_TC_CURR_BUFFER_CNT_2"}, + {true, "PACKET_TC_CURR_BUFFER_CNT_3"}, + {true, "PACKET_TC_CURR_BUFFER_CNT_4"}, + + {true, "PACKET_TC_CURR_BUFFER_CNT_5"}, + {true, "PACKET_TC_CURR_BUFFER_CNT_6"}, + {true, "PACKET_TC_CURR_BUFFER_CNT_7"}, + {true, "PACKET_CURR_BUFFER_CNT"}, + {false, "Reserved"}, + {false, "Reserved"}, + + {true, "RX_PACKET_IN_CNT_L"}, + {true, "RX_PACKET_IN_CNT_H"}, + {true, "RX_PACKET_OUT_CNT_L"}, + {true, "RX_PACKET_OUT_CNT_H"}, + {true, "TX_PACKET_IN_CNT_L"}, + {true, "TX_PACKET_IN_CNT_H"}, + + {true, "TX_PACKET_OUT_CNT_L"}, + {true, "TX_PACKET_OUT_CNT_H"}, + {true, "ROC_RX_PACKET_IN_CNT_L"}, + {true, "ROC_RX_PACKET_IN_CNT_H"}, + {true, "ROC_TX_PACKET_OUT_CNT_L"}, + {true, "ROC_TX_PACKET_OUT_CNT_H"}, + + {true, "RX_PACKET_TC_IN_CNT_0_L"}, + {true, "RX_PACKET_TC_IN_CNT_0_H"}, + {true, "RX_PACKET_TC_IN_CNT_1_L"}, + {true, "RX_PACKET_TC_IN_CNT_1_H"}, + {true, "RX_PACKET_TC_IN_CNT_2_L"}, + {true, "RX_PACKET_TC_IN_CNT_2_H"}, + + {true, "RX_PACKET_TC_IN_CNT_3_L"}, + {true, "RX_PACKET_TC_IN_CNT_3_H"}, + {true, "RX_PACKET_TC_IN_CNT_4_L"}, + {true, "RX_PACKET_TC_IN_CNT_4_H"}, + {true, "RX_PACKET_TC_IN_CNT_5_L"}, + {true, "RX_PACKET_TC_IN_CNT_5_H"}, + + {true, "RX_PACKET_TC_IN_CNT_6_L"}, + {true, "RX_PACKET_TC_IN_CNT_6_H"}, + {true, "RX_PACKET_TC_IN_CNT_7_L"}, + {true, "RX_PACKET_TC_IN_CNT_7_H"}, + {true, "RX_PACKET_TC_OUT_CNT_0_L"}, + {true, "RX_PACKET_TC_OUT_CNT_0_H"}, + + {true, "RX_PACKET_TC_OUT_CNT_1_L"}, + {true, "RX_PACKET_TC_OUT_CNT_1_H"}, + {true, "RX_PACKET_TC_OUT_CNT_2_L"}, + {true, "RX_PACKET_TC_OUT_CNT_2_H"}, + {true, "RX_PACKET_TC_OUT_CNT_3_L"}, + {true, "RX_PACKET_TC_OUT_CNT_3_H"}, + + {true, "RX_PACKET_TC_OUT_CNT_4_L"}, + {true, "RX_PACKET_TC_OUT_CNT_4_H"}, + {true, "RX_PACKET_TC_OUT_CNT_5_L"}, + {true, "RX_PACKET_TC_OUT_CNT_5_H"}, + {true, "RX_PACKET_TC_OUT_CNT_6_L"}, + {true, "RX_PACKET_TC_OUT_CNT_6_H"}, + + {true, "RX_PACKET_TC_OUT_CNT_7_L"}, + {true, "RX_PACKET_TC_OUT_CNT_7_H"}, + {true, "TX_PACKET_TC_IN_CNT_0_L"}, + {true, "TX_PACKET_TC_IN_CNT_0_H"}, + {true, "TX_PACKET_TC_IN_CNT_1_L"}, + {true, "TX_PACKET_TC_IN_CNT_1_H"}, + + {true, "TX_PACKET_TC_IN_CNT_2_L"}, + {true, "TX_PACKET_TC_IN_CNT_2_H"}, + {true, "TX_PACKET_TC_IN_CNT_3_L"}, + {true, "TX_PACKET_TC_IN_CNT_3_H"}, + {true, "TX_PACKET_TC_IN_CNT_4_L"}, + {true, "TX_PACKET_TC_IN_CNT_4_H"}, + + {true, "TX_PACKET_TC_IN_CNT_5_L"}, + {true, "TX_PACKET_TC_IN_CNT_5_H"}, + {true, "TX_PACKET_TC_IN_CNT_6_L"}, + {true, "TX_PACKET_TC_IN_CNT_6_H"}, + {true, "TX_PACKET_TC_IN_CNT_7_L"}, + {true, "TX_PACKET_TC_IN_CNT_7_H"}, + + {true, "TX_PACKET_TC_OUT_CNT_0_L"}, + {true, "TX_PACKET_TC_OUT_CNT_0_H"}, + {true, "TX_PACKET_TC_OUT_CNT_1_L"}, + {true, "TX_PACKET_TC_OUT_CNT_1_H"}, + {true, "TX_PACKET_TC_OUT_CNT_2_L"}, + {true, "TX_PACKET_TC_OUT_CNT_2_H"}, + + {true, "TX_PACKET_TC_OUT_CNT_3_L"}, + {true, "TX_PACKET_TC_OUT_CNT_3_H"}, + {true, "TX_PACKET_TC_OUT_CNT_4_L"}, + {true, "TX_PACKET_TC_OUT_CNT_4_H"}, + {true, "TX_PACKET_TC_OUT_CNT_5_L"}, + {true, "TX_PACKET_TC_OUT_CNT_5_H"}, + + {true, "TX_PACKET_TC_OUT_CNT_6_L"}, + {true, "TX_PACKET_TC_OUT_CNT_6_H"}, + {true, "TX_PACKET_TC_OUT_CNT_7_L"}, + {true, "TX_PACKET_TC_OUT_CNT_7_H"}, + {false, "Reserved"}, + {false, "Reserved"}, +}; + +static const struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_2[] = { + {true, "OQ_INDEX"}, + {true, "QUEUE_CNT"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, +}; + +static const struct hclge_dbg_dfx_message hclge_dbg_igu_egu_reg[] = { + {true, "prt_id"}, + {true, "IGU_RX_ERR_PKT"}, + {true, "IGU_RX_NO_SOF_PKT"}, + {true, "EGU_TX_1588_SHORT_PKT"}, + {true, "EGU_TX_1588_PKT"}, + {true, "EGU_TX_ERR_PKT"}, + + {true, "IGU_RX_OUT_L2_PKT"}, + {true, "IGU_RX_OUT_L3_PKT"}, + {true, "IGU_RX_OUT_L4_PKT"}, + {true, "IGU_RX_IN_L2_PKT"}, + {true, "IGU_RX_IN_L3_PKT"}, + {true, "IGU_RX_IN_L4_PKT"}, + + {true, "IGU_RX_EL3E_PKT"}, + {true, "IGU_RX_EL4E_PKT"}, + {true, "IGU_RX_L3E_PKT"}, + {true, "IGU_RX_L4E_PKT"}, + {true, "IGU_RX_ROCEE_PKT"}, + {true, "IGU_RX_OUT_UDP0_PKT"}, + + {true, "IGU_RX_IN_UDP0_PKT"}, + {true, "IGU_MC_CAR_DROP_PKT_L"}, + {true, "IGU_MC_CAR_DROP_PKT_H"}, + {true, "IGU_BC_CAR_DROP_PKT_L"}, + {true, "IGU_BC_CAR_DROP_PKT_H"}, + {false, "Reserved"}, + + {true, "IGU_RX_OVERSIZE_PKT_L"}, + {true, "IGU_RX_OVERSIZE_PKT_H"}, + {true, "IGU_RX_UNDERSIZE_PKT_L"}, + {true, "IGU_RX_UNDERSIZE_PKT_H"}, + {true, "IGU_RX_OUT_ALL_PKT_L"}, + {true, "IGU_RX_OUT_ALL_PKT_H"}, + + {true, "IGU_TX_OUT_ALL_PKT_L"}, + {true, "IGU_TX_OUT_ALL_PKT_H"}, + {true, "IGU_RX_UNI_PKT_L"}, + {true, "IGU_RX_UNI_PKT_H"}, + {true, "IGU_RX_MULTI_PKT_L"}, + {true, "IGU_RX_MULTI_PKT_H"}, + + {true, "IGU_RX_BROAD_PKT_L"}, + {true, "IGU_RX_BROAD_PKT_H"}, + {true, "EGU_TX_OUT_ALL_PKT_L"}, + {true, "EGU_TX_OUT_ALL_PKT_H"}, + {true, "EGU_TX_UNI_PKT_L"}, + {true, "EGU_TX_UNI_PKT_H"}, + + {true, "EGU_TX_MULTI_PKT_L"}, + {true, "EGU_TX_MULTI_PKT_H"}, + {true, "EGU_TX_BROAD_PKT_L"}, + {true, "EGU_TX_BROAD_PKT_H"}, + {true, "IGU_TX_KEY_NUM_L"}, + {true, "IGU_TX_KEY_NUM_H"}, + + {true, "IGU_RX_NON_TUN_PKT_L"}, + {true, "IGU_RX_NON_TUN_PKT_H"}, + {true, "IGU_RX_TUN_PKT_L"}, + {true, "IGU_RX_TUN_PKT_H"}, + {false, "Reserved"}, + {false, "Reserved"}, +}; + +static const struct hclge_dbg_dfx_message hclge_dbg_rpu_reg_0[] = { + {true, "tc_queue_num"}, + {true, "FSM_DFX_ST0"}, + {true, "FSM_DFX_ST1"}, + {true, "RPU_RX_PKT_DROP_CNT"}, + {true, "BUF_WAIT_TIMEOUT"}, + {true, "BUF_WAIT_TIMEOUT_QID"}, +}; + +static const struct hclge_dbg_dfx_message hclge_dbg_rpu_reg_1[] = { + {false, "Reserved"}, + {true, "FIFO_DFX_ST0"}, + {true, "FIFO_DFX_ST1"}, + {true, "FIFO_DFX_ST2"}, + {true, "FIFO_DFX_ST3"}, + {true, "FIFO_DFX_ST4"}, + + {true, "FIFO_DFX_ST5"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, +}; + +static const struct hclge_dbg_dfx_message hclge_dbg_ncsi_reg[] = { + {false, "Reserved"}, + {true, "NCSI_EGU_TX_FIFO_STS"}, + {true, "NCSI_PAUSE_STATUS"}, + {true, "NCSI_RX_CTRL_DMAC_ERR_CNT"}, + {true, "NCSI_RX_CTRL_SMAC_ERR_CNT"}, + {true, "NCSI_RX_CTRL_CKS_ERR_CNT"}, + + {true, "NCSI_RX_CTRL_PKT_CNT"}, + {true, "NCSI_RX_PT_DMAC_ERR_CNT"}, + {true, "NCSI_RX_PT_SMAC_ERR_CNT"}, + {true, "NCSI_RX_PT_PKT_CNT"}, + {true, "NCSI_RX_FCS_ERR_CNT"}, + {true, "NCSI_TX_CTRL_DMAC_ERR_CNT"}, + + {true, "NCSI_TX_CTRL_SMAC_ERR_CNT"}, + {true, "NCSI_TX_CTRL_PKT_CNT"}, + {true, "NCSI_TX_PT_DMAC_ERR_CNT"}, + {true, "NCSI_TX_PT_SMAC_ERR_CNT"}, + {true, "NCSI_TX_PT_PKT_CNT"}, + {true, "NCSI_TX_PT_PKT_TRUNC_CNT"}, + + {true, "NCSI_TX_PT_PKT_ERR_CNT"}, + {true, "NCSI_TX_CTRL_PKT_ERR_CNT"}, + {true, "NCSI_RX_CTRL_PKT_TRUNC_CNT"}, + {true, "NCSI_RX_CTRL_PKT_CFLIT_CNT"}, + {false, "Reserved"}, + {false, "Reserved"}, + + {true, "NCSI_MAC_RX_OCTETS_OK"}, + {true, "NCSI_MAC_RX_OCTETS_BAD"}, + {true, "NCSI_MAC_RX_UC_PKTS"}, + {true, "NCSI_MAC_RX_MC_PKTS"}, + {true, "NCSI_MAC_RX_BC_PKTS"}, + {true, "NCSI_MAC_RX_PKTS_64OCTETS"}, + + {true, "NCSI_MAC_RX_PKTS_65TO127OCTETS"}, + {true, "NCSI_MAC_RX_PKTS_128TO255OCTETS"}, + {true, "NCSI_MAC_RX_PKTS_255TO511OCTETS"}, + {true, "NCSI_MAC_RX_PKTS_512TO1023OCTETS"}, + {true, "NCSI_MAC_RX_PKTS_1024TO1518OCTETS"}, + {true, "NCSI_MAC_RX_PKTS_1519TOMAXOCTETS"}, + + {true, "NCSI_MAC_RX_FCS_ERRORS"}, + {true, "NCSI_MAC_RX_LONG_ERRORS"}, + {true, "NCSI_MAC_RX_JABBER_ERRORS"}, + {true, "NCSI_MAC_RX_RUNT_ERR_CNT"}, + {true, "NCSI_MAC_RX_SHORT_ERR_CNT"}, + {true, "NCSI_MAC_RX_FILT_PKT_CNT"}, + + {true, "NCSI_MAC_RX_OCTETS_TOTAL_FILT"}, + {true, "NCSI_MAC_TX_OCTETS_OK"}, + {true, "NCSI_MAC_TX_OCTETS_BAD"}, + {true, "NCSI_MAC_TX_UC_PKTS"}, + {true, "NCSI_MAC_TX_MC_PKTS"}, + {true, "NCSI_MAC_TX_BC_PKTS"}, + + {true, "NCSI_MAC_TX_PKTS_64OCTETS"}, + {true, "NCSI_MAC_TX_PKTS_65TO127OCTETS"}, + {true, "NCSI_MAC_TX_PKTS_128TO255OCTETS"}, + {true, "NCSI_MAC_TX_PKTS_256TO511OCTETS"}, + {true, "NCSI_MAC_TX_PKTS_512TO1023OCTETS"}, + {true, "NCSI_MAC_TX_PKTS_1024TO1518OCTETS"}, + + {true, "NCSI_MAC_TX_PKTS_1519TOMAXOCTETS"}, + {true, "NCSI_MAC_TX_UNDERRUN"}, + {true, "NCSI_MAC_TX_CRC_ERROR"}, + {true, "NCSI_MAC_TX_PAUSE_FRAMES"}, + {true, "NCSI_MAC_RX_PAD_PKTS"}, + {true, "NCSI_MAC_RX_PAUSE_FRAMES"}, +}; + +static const struct hclge_dbg_dfx_message hclge_dbg_rtc_reg[] = { + {false, "Reserved"}, + {true, "LGE_IGU_AFIFO_DFX_0"}, + {true, "LGE_IGU_AFIFO_DFX_1"}, + {true, "LGE_IGU_AFIFO_DFX_2"}, + {true, "LGE_IGU_AFIFO_DFX_3"}, + {true, "LGE_IGU_AFIFO_DFX_4"}, + + {true, "LGE_IGU_AFIFO_DFX_5"}, + {true, "LGE_IGU_AFIFO_DFX_6"}, + {true, "LGE_IGU_AFIFO_DFX_7"}, + {true, "LGE_EGU_AFIFO_DFX_0"}, + {true, "LGE_EGU_AFIFO_DFX_1"}, + {true, "LGE_EGU_AFIFO_DFX_2"}, + + {true, "LGE_EGU_AFIFO_DFX_3"}, + {true, "LGE_EGU_AFIFO_DFX_4"}, + {true, "LGE_EGU_AFIFO_DFX_5"}, + {true, "LGE_EGU_AFIFO_DFX_6"}, + {true, "LGE_EGU_AFIFO_DFX_7"}, + {true, "CGE_IGU_AFIFO_DFX_0"}, + + {true, "CGE_IGU_AFIFO_DFX_1"}, + {true, "CGE_EGU_AFIFO_DFX_0"}, + {true, "CGE_EGU_AFIFO_DFX_1"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, +}; + +static const struct hclge_dbg_dfx_message hclge_dbg_ppp_reg[] = { + {false, "Reserved"}, + {true, "DROP_FROM_PRT_PKT_CNT"}, + {true, "DROP_FROM_HOST_PKT_CNT"}, + {true, "DROP_TX_VLAN_PROC_CNT"}, + {true, "DROP_MNG_CNT"}, + {true, "DROP_FD_CNT"}, + + {true, "DROP_NO_DST_CNT"}, + {true, "DROP_MC_MBID_FULL_CNT"}, + {true, "DROP_SC_FILTERED"}, + {true, "PPP_MC_DROP_PKT_CNT"}, + {true, "DROP_PT_CNT"}, + {true, "DROP_MAC_ANTI_SPOOF_CNT"}, + + {true, "DROP_IG_VFV_CNT"}, + {true, "DROP_IG_PRTV_CNT"}, + {true, "DROP_CNM_PFC_PAUSE_CNT"}, + {true, "DROP_TORUS_TC_CNT"}, + {true, "DROP_TORUS_LPBK_CNT"}, + {true, "PPP_HFS_STS"}, + + {true, "PPP_MC_RSLT_STS"}, + {true, "PPP_P3U_STS"}, + {true, "PPP_RSLT_DESCR_STS"}, + {true, "PPP_UMV_STS_0"}, + {true, "PPP_UMV_STS_1"}, + {true, "PPP_VFV_STS"}, + + {true, "PPP_GRO_KEY_CNT"}, + {true, "PPP_GRO_INFO_CNT"}, + {true, "PPP_GRO_DROP_CNT"}, + {true, "PPP_GRO_OUT_CNT"}, + {true, "PPP_GRO_KEY_MATCH_DATA_CNT"}, + {true, "PPP_GRO_KEY_MATCH_TCAM_CNT"}, + + {true, "PPP_GRO_INFO_MATCH_CNT"}, + {true, "PPP_GRO_FREE_ENTRY_CNT"}, + {true, "PPP_GRO_INNER_DFX_SIGNAL"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, + + {true, "GET_RX_PKT_CNT_L"}, + {true, "GET_RX_PKT_CNT_H"}, + {true, "GET_TX_PKT_CNT_L"}, + {true, "GET_TX_PKT_CNT_H"}, + {true, "SEND_UC_PRT2HOST_PKT_CNT_L"}, + {true, "SEND_UC_PRT2HOST_PKT_CNT_H"}, + + {true, "SEND_UC_PRT2PRT_PKT_CNT_L"}, + {true, "SEND_UC_PRT2PRT_PKT_CNT_H"}, + {true, "SEND_UC_HOST2HOST_PKT_CNT_L"}, + {true, "SEND_UC_HOST2HOST_PKT_CNT_H"}, + {true, "SEND_UC_HOST2PRT_PKT_CNT_L"}, + {true, "SEND_UC_HOST2PRT_PKT_CNT_H"}, + + {true, "SEND_MC_FROM_PRT_CNT_L"}, + {true, "SEND_MC_FROM_PRT_CNT_H"}, + {true, "SEND_MC_FROM_HOST_CNT_L"}, + {true, "SEND_MC_FROM_HOST_CNT_H"}, + {true, "SSU_MC_RD_CNT_L"}, + {true, "SSU_MC_RD_CNT_H"}, + + {true, "SSU_MC_DROP_CNT_L"}, + {true, "SSU_MC_DROP_CNT_H"}, + {true, "SSU_MC_RD_PKT_CNT_L"}, + {true, "SSU_MC_RD_PKT_CNT_H"}, + {true, "PPP_MC_2HOST_PKT_CNT_L"}, + {true, "PPP_MC_2HOST_PKT_CNT_H"}, + + {true, "PPP_MC_2PRT_PKT_CNT_L"}, + {true, "PPP_MC_2PRT_PKT_CNT_H"}, + {true, "NTSNOS_PKT_CNT_L"}, + {true, "NTSNOS_PKT_CNT_H"}, + {true, "NTUP_PKT_CNT_L"}, + {true, "NTUP_PKT_CNT_H"}, + + {true, "NTLCL_PKT_CNT_L"}, + {true, "NTLCL_PKT_CNT_H"}, + {true, "NTTGT_PKT_CNT_L"}, + {true, "NTTGT_PKT_CNT_H"}, + {true, "RTNS_PKT_CNT_L"}, + {true, "RTNS_PKT_CNT_H"}, + + {true, "RTLPBK_PKT_CNT_L"}, + {true, "RTLPBK_PKT_CNT_H"}, + {true, "NR_PKT_CNT_L"}, + {true, "NR_PKT_CNT_H"}, + {true, "RR_PKT_CNT_L"}, + {true, "RR_PKT_CNT_H"}, + + {true, "MNG_TBL_HIT_CNT_L"}, + {true, "MNG_TBL_HIT_CNT_H"}, + {true, "FD_TBL_HIT_CNT_L"}, + {true, "FD_TBL_HIT_CNT_H"}, + {true, "FD_LKUP_CNT_L"}, + {true, "FD_LKUP_CNT_H"}, + + {true, "BC_HIT_CNT_L"}, + {true, "BC_HIT_CNT_H"}, + {true, "UM_TBL_UC_HIT_CNT_L"}, + {true, "UM_TBL_UC_HIT_CNT_H"}, + {true, "UM_TBL_MC_HIT_CNT_L"}, + {true, "UM_TBL_MC_HIT_CNT_H"}, + + {true, "UM_TBL_VMDQ1_HIT_CNT_L"}, + {true, "UM_TBL_VMDQ1_HIT_CNT_H"}, + {true, "MTA_TBL_HIT_CNT_L"}, + {true, "MTA_TBL_HIT_CNT_H"}, + {true, "FWD_BONDING_HIT_CNT_L"}, + {true, "FWD_BONDING_HIT_CNT_H"}, + + {true, "PROMIS_TBL_HIT_CNT_L"}, + {true, "PROMIS_TBL_HIT_CNT_H"}, + {true, "GET_TUNL_PKT_CNT_L"}, + {true, "GET_TUNL_PKT_CNT_H"}, + {true, "GET_BMC_PKT_CNT_L"}, + {true, "GET_BMC_PKT_CNT_H"}, + + {true, "SEND_UC_PRT2BMC_PKT_CNT_L"}, + {true, "SEND_UC_PRT2BMC_PKT_CNT_H"}, + {true, "SEND_UC_HOST2BMC_PKT_CNT_L"}, + {true, "SEND_UC_HOST2BMC_PKT_CNT_H"}, + {true, "SEND_UC_BMC2HOST_PKT_CNT_L"}, + {true, "SEND_UC_BMC2HOST_PKT_CNT_H"}, + + {true, "SEND_UC_BMC2PRT_PKT_CNT_L"}, + {true, "SEND_UC_BMC2PRT_PKT_CNT_H"}, + {true, "PPP_MC_2BMC_PKT_CNT_L"}, + {true, "PPP_MC_2BMC_PKT_CNT_H"}, + {true, "VLAN_MIRR_CNT_L"}, + {true, "VLAN_MIRR_CNT_H"}, + + {true, "IG_MIRR_CNT_L"}, + {true, "IG_MIRR_CNT_H"}, + {true, "EG_MIRR_CNT_L"}, + {true, "EG_MIRR_CNT_H"}, + {true, "RX_DEFAULT_HOST_HIT_CNT_L"}, + {true, "RX_DEFAULT_HOST_HIT_CNT_H"}, + + {true, "LAN_PAIR_CNT_L"}, + {true, "LAN_PAIR_CNT_H"}, + {true, "UM_TBL_MC_HIT_PKT_CNT_L"}, + {true, "UM_TBL_MC_HIT_PKT_CNT_H"}, + {true, "MTA_TBL_HIT_PKT_CNT_L"}, + {true, "MTA_TBL_HIT_PKT_CNT_H"}, + + {true, "PROMIS_TBL_HIT_PKT_CNT_L"}, + {true, "PROMIS_TBL_HIT_PKT_CNT_H"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, +}; + +static const struct hclge_dbg_dfx_message hclge_dbg_rcb_reg[] = { + {false, "Reserved"}, + {true, "FSM_DFX_ST0"}, + {true, "FSM_DFX_ST1"}, + {true, "FSM_DFX_ST2"}, + {true, "FIFO_DFX_ST0"}, + {true, "FIFO_DFX_ST1"}, + + {true, "FIFO_DFX_ST2"}, + {true, "FIFO_DFX_ST3"}, + {true, "FIFO_DFX_ST4"}, + {true, "FIFO_DFX_ST5"}, + {true, "FIFO_DFX_ST6"}, + {true, "FIFO_DFX_ST7"}, + + {true, "FIFO_DFX_ST8"}, + {true, "FIFO_DFX_ST9"}, + {true, "FIFO_DFX_ST10"}, + {true, "FIFO_DFX_ST11"}, + {true, "Q_CREDIT_VLD_0"}, + {true, "Q_CREDIT_VLD_1"}, + + {true, "Q_CREDIT_VLD_2"}, + {true, "Q_CREDIT_VLD_3"}, + {true, "Q_CREDIT_VLD_4"}, + {true, "Q_CREDIT_VLD_5"}, + {true, "Q_CREDIT_VLD_6"}, + {true, "Q_CREDIT_VLD_7"}, + + {true, "Q_CREDIT_VLD_8"}, + {true, "Q_CREDIT_VLD_9"}, + {true, "Q_CREDIT_VLD_10"}, + {true, "Q_CREDIT_VLD_11"}, + {true, "Q_CREDIT_VLD_12"}, + {true, "Q_CREDIT_VLD_13"}, + + {true, "Q_CREDIT_VLD_14"}, + {true, "Q_CREDIT_VLD_15"}, + {true, "Q_CREDIT_VLD_16"}, + {true, "Q_CREDIT_VLD_17"}, + {true, "Q_CREDIT_VLD_18"}, + {true, "Q_CREDIT_VLD_19"}, + + {true, "Q_CREDIT_VLD_20"}, + {true, "Q_CREDIT_VLD_21"}, + {true, "Q_CREDIT_VLD_22"}, + {true, "Q_CREDIT_VLD_23"}, + {true, "Q_CREDIT_VLD_24"}, + {true, "Q_CREDIT_VLD_25"}, + + {true, "Q_CREDIT_VLD_26"}, + {true, "Q_CREDIT_VLD_27"}, + {true, "Q_CREDIT_VLD_28"}, + {true, "Q_CREDIT_VLD_29"}, + {true, "Q_CREDIT_VLD_30"}, + {true, "Q_CREDIT_VLD_31"}, + + {true, "GRO_BD_SERR_CNT"}, + {true, "GRO_CONTEXT_SERR_CNT"}, + {true, "RX_STASH_CFG_SERR_CNT"}, + {true, "AXI_RD_FBD_SERR_CNT"}, + {true, "GRO_BD_MERR_CNT"}, + {true, "GRO_CONTEXT_MERR_CNT"}, + + {true, "RX_STASH_CFG_MERR_CNT"}, + {true, "AXI_RD_FBD_MERR_CNT"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, +}; + +static const struct hclge_dbg_dfx_message hclge_dbg_tqp_reg[] = { + {true, "q_num"}, + {true, "RCB_CFG_RX_RING_TAIL"}, + {true, "RCB_CFG_RX_RING_HEAD"}, + {true, "RCB_CFG_RX_RING_FBDNUM"}, + {true, "RCB_CFG_RX_RING_OFFSET"}, + {true, "RCB_CFG_RX_RING_FBDOFFSET"}, + + {true, "RCB_CFG_RX_RING_PKTNUM_RECORD"}, + {true, "RCB_CFG_TX_RING_TAIL"}, + {true, "RCB_CFG_TX_RING_HEAD"}, + {true, "RCB_CFG_TX_RING_FBDNUM"}, + {true, "RCB_CFG_TX_RING_OFFSET"}, + {true, "RCB_CFG_TX_RING_EBDNUM"}, +}; + +#define HCLGE_DBG_INFO_LEN 256 +#define HCLGE_DBG_VLAN_FLTR_INFO_LEN 256 +#define HCLGE_DBG_VLAN_OFFLOAD_INFO_LEN 512 +#define HCLGE_DBG_ID_LEN 16 +#define HCLGE_DBG_ITEM_NAME_LEN 32 +#define HCLGE_DBG_DATA_STR_LEN 32 +#define HCLGE_DBG_TM_INFO_LEN 256 + +#define HCLGE_BILLION_NANO_SECONDS 1000000000 + +struct hclge_dbg_item { + char name[HCLGE_DBG_ITEM_NAME_LEN]; + u16 interval; /* blank numbers after the item */ +}; + +struct hclge_dbg_vlan_cfg { + u16 pvid; + u8 accept_tag1; + u8 accept_tag2; + u8 accept_untag1; + u8 accept_untag2; + u8 insert_tag1; + u8 insert_tag2; + u8 shift_tag; + u8 strip_tag1; + u8 strip_tag2; + u8 drop_tag1; + u8 drop_tag2; + u8 pri_only1; + u8 pri_only2; +}; + +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c new file mode 100644 index 000000000..4c441e6a5 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c @@ -0,0 +1,134 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2021 Hisilicon Limited. */ + +#include <net/devlink.h> + +#include "hclge_devlink.h" + +static int hclge_devlink_info_get(struct devlink *devlink, + struct devlink_info_req *req, + struct netlink_ext_ack *extack) +{ +#define HCLGE_DEVLINK_FW_STRING_LEN 32 + struct hclge_devlink_priv *priv = devlink_priv(devlink); + char version_str[HCLGE_DEVLINK_FW_STRING_LEN]; + struct hclge_dev *hdev = priv->hdev; + int ret; + + ret = devlink_info_driver_name_put(req, KBUILD_MODNAME); + if (ret) + return ret; + + snprintf(version_str, sizeof(version_str), "%lu.%lu.%lu.%lu", + hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE3_MASK, + HNAE3_FW_VERSION_BYTE3_SHIFT), + hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE2_MASK, + HNAE3_FW_VERSION_BYTE2_SHIFT), + hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE1_MASK, + HNAE3_FW_VERSION_BYTE1_SHIFT), + hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE0_MASK, + HNAE3_FW_VERSION_BYTE0_SHIFT)); + + return devlink_info_version_running_put(req, + DEVLINK_INFO_VERSION_GENERIC_FW, + version_str); +} + +static int hclge_devlink_reload_down(struct devlink *devlink, bool netns_change, + enum devlink_reload_action action, + enum devlink_reload_limit limit, + struct netlink_ext_ack *extack) +{ + struct hclge_devlink_priv *priv = devlink_priv(devlink); + struct hclge_dev *hdev = priv->hdev; + struct hnae3_handle *h = &hdev->vport->nic; + struct pci_dev *pdev = hdev->pdev; + int ret; + + if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) { + dev_err(&pdev->dev, "reset is handling\n"); + return -EBUSY; + } + + switch (action) { + case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: + rtnl_lock(); + ret = hdev->nic_client->ops->reset_notify(h, HNAE3_DOWN_CLIENT); + if (ret) { + rtnl_unlock(); + return ret; + } + + ret = hdev->nic_client->ops->reset_notify(h, + HNAE3_UNINIT_CLIENT); + rtnl_unlock(); + return ret; + default: + return -EOPNOTSUPP; + } +} + +static int hclge_devlink_reload_up(struct devlink *devlink, + enum devlink_reload_action action, + enum devlink_reload_limit limit, + u32 *actions_performed, + struct netlink_ext_ack *extack) +{ + struct hclge_devlink_priv *priv = devlink_priv(devlink); + struct hclge_dev *hdev = priv->hdev; + struct hnae3_handle *h = &hdev->vport->nic; + int ret; + + *actions_performed = BIT(action); + switch (action) { + case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: + rtnl_lock(); + ret = hdev->nic_client->ops->reset_notify(h, HNAE3_INIT_CLIENT); + if (ret) { + rtnl_unlock(); + return ret; + } + + ret = hdev->nic_client->ops->reset_notify(h, HNAE3_UP_CLIENT); + rtnl_unlock(); + return ret; + default: + return -EOPNOTSUPP; + } +} + +static const struct devlink_ops hclge_devlink_ops = { + .info_get = hclge_devlink_info_get, + .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT), + .reload_down = hclge_devlink_reload_down, + .reload_up = hclge_devlink_reload_up, +}; + +int hclge_devlink_init(struct hclge_dev *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + struct hclge_devlink_priv *priv; + struct devlink *devlink; + + devlink = devlink_alloc(&hclge_devlink_ops, + sizeof(struct hclge_devlink_priv), &pdev->dev); + if (!devlink) + return -ENOMEM; + + priv = devlink_priv(devlink); + priv->hdev = hdev; + hdev->devlink = devlink; + + devlink_set_features(devlink, DEVLINK_F_RELOAD); + devlink_register(devlink); + return 0; +} + +void hclge_devlink_uninit(struct hclge_dev *hdev) +{ + struct devlink *devlink = hdev->devlink; + + devlink_unregister(devlink); + + devlink_free(devlink); +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.h new file mode 100644 index 000000000..918be0450 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2021 Hisilicon Limited. */ + +#ifndef __HCLGE_DEVLINK_H +#define __HCLGE_DEVLINK_H + +#include "hclge_main.h" + +struct hclge_devlink_priv { + struct hclge_dev *hdev; +}; + +int hclge_devlink_init(struct hclge_dev *hdev); +void hclge_devlink_uninit(struct hclge_dev *hdev); +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c new file mode 100644 index 000000000..6efd768cc --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c @@ -0,0 +1,2940 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2016-2017 Hisilicon Limited. */ + +#include "hclge_err.h" + +static const struct hclge_hw_error hclge_imp_tcm_ecc_int[] = { + { + .int_msk = BIT(1), + .msg = "imp_itcm0_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(3), + .msg = "imp_itcm1_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(5), + .msg = "imp_itcm2_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(7), + .msg = "imp_itcm3_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(9), + .msg = "imp_dtcm0_mem0_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(11), + .msg = "imp_dtcm0_mem1_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(13), + .msg = "imp_dtcm1_mem0_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(15), + .msg = "imp_dtcm1_mem1_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(17), + .msg = "imp_itcm4_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + /* sentinel */ + } +}; + +static const struct hclge_hw_error hclge_cmdq_nic_mem_ecc_int[] = { + { + .int_msk = BIT(1), + .msg = "cmdq_nic_rx_depth_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(3), + .msg = "cmdq_nic_tx_depth_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(5), + .msg = "cmdq_nic_rx_tail_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(7), + .msg = "cmdq_nic_tx_tail_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(9), + .msg = "cmdq_nic_rx_head_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(11), + .msg = "cmdq_nic_tx_head_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(13), + .msg = "cmdq_nic_rx_addr_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(15), + .msg = "cmdq_nic_tx_addr_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(17), + .msg = "cmdq_rocee_rx_depth_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(19), + .msg = "cmdq_rocee_tx_depth_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(21), + .msg = "cmdq_rocee_rx_tail_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(23), + .msg = "cmdq_rocee_tx_tail_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(25), + .msg = "cmdq_rocee_rx_head_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(27), + .msg = "cmdq_rocee_tx_head_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(29), + .msg = "cmdq_rocee_rx_addr_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(31), + .msg = "cmdq_rocee_tx_addr_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + /* sentinel */ + } +}; + +static const struct hclge_hw_error hclge_tqp_int_ecc_int[] = { + { + .int_msk = BIT(6), + .msg = "tqp_int_cfg_even_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(7), + .msg = "tqp_int_cfg_odd_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(8), + .msg = "tqp_int_ctrl_even_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(9), + .msg = "tqp_int_ctrl_odd_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(10), + .msg = "tx_que_scan_int_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(11), + .msg = "rx_que_scan_int_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + /* sentinel */ + } +}; + +static const struct hclge_hw_error hclge_msix_sram_ecc_int[] = { + { + .int_msk = BIT(1), + .msg = "msix_nic_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(3), + .msg = "msix_rocee_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + /* sentinel */ + } +}; + +static const struct hclge_hw_error hclge_igu_int[] = { + { + .int_msk = BIT(0), + .msg = "igu_rx_buf0_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(2), + .msg = "igu_rx_buf1_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } +}; + +static const struct hclge_hw_error hclge_igu_egu_tnl_int[] = { + { + .int_msk = BIT(0), + .msg = "rx_buf_overflow", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(1), + .msg = "rx_stp_fifo_overflow", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(2), + .msg = "rx_stp_fifo_underflow", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(3), + .msg = "tx_buf_overflow", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(4), + .msg = "tx_buf_underrun", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(5), + .msg = "rx_stp_buf_overflow", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } +}; + +static const struct hclge_hw_error hclge_ncsi_err_int[] = { + { + .int_msk = BIT(1), + .msg = "ncsi_tx_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + /* sentinel */ + } +}; + +static const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st1[] = { + { + .int_msk = BIT(0), + .msg = "vf_vlan_ad_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(1), + .msg = "umv_mcast_group_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(2), + .msg = "umv_key_mem0_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(3), + .msg = "umv_key_mem1_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(4), + .msg = "umv_key_mem2_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(5), + .msg = "umv_key_mem3_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(6), + .msg = "umv_ad_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(7), + .msg = "rss_tc_mode_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(8), + .msg = "rss_idt_mem0_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(9), + .msg = "rss_idt_mem1_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(10), + .msg = "rss_idt_mem2_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(11), + .msg = "rss_idt_mem3_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(12), + .msg = "rss_idt_mem4_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(13), + .msg = "rss_idt_mem5_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(14), + .msg = "rss_idt_mem6_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(15), + .msg = "rss_idt_mem7_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(16), + .msg = "rss_idt_mem8_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(17), + .msg = "rss_idt_mem9_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(18), + .msg = "rss_idt_mem10_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(19), + .msg = "rss_idt_mem11_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(20), + .msg = "rss_idt_mem12_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(21), + .msg = "rss_idt_mem13_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(22), + .msg = "rss_idt_mem14_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(23), + .msg = "rss_idt_mem15_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(24), + .msg = "port_vlan_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(25), + .msg = "mcast_linear_table_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(26), + .msg = "mcast_result_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(27), + .msg = "flow_director_ad_mem0_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(28), + .msg = "flow_director_ad_mem1_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(29), + .msg = "rx_vlan_tag_memory_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(30), + .msg = "Tx_UP_mapping_config_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } +}; + +static const struct hclge_hw_error hclge_ppp_pf_abnormal_int[] = { + { + .int_msk = BIT(0), + .msg = "tx_vlan_tag_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(1), + .msg = "rss_list_tc_unassigned_queue_err", + .reset_level = HNAE3_NONE_RESET + }, { + /* sentinel */ + } +}; + +static const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st3[] = { + { + .int_msk = BIT(0), + .msg = "hfs_fifo_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(1), + .msg = "rslt_descr_fifo_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(2), + .msg = "tx_vlan_tag_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(3), + .msg = "FD_CN0_memory_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(4), + .msg = "FD_CN1_memory_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(5), + .msg = "GRO_AD_memory_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } +}; + +static const struct hclge_hw_error hclge_tm_sch_rint[] = { + { + .int_msk = BIT(1), + .msg = "tm_sch_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(2), + .msg = "tm_sch_port_shap_sub_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(3), + .msg = "tm_sch_port_shap_sub_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(4), + .msg = "tm_sch_pg_pshap_sub_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(5), + .msg = "tm_sch_pg_pshap_sub_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(6), + .msg = "tm_sch_pg_cshap_sub_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(7), + .msg = "tm_sch_pg_cshap_sub_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(8), + .msg = "tm_sch_pri_pshap_sub_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(9), + .msg = "tm_sch_pri_pshap_sub_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(10), + .msg = "tm_sch_pri_cshap_sub_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(11), + .msg = "tm_sch_pri_cshap_sub_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(12), + .msg = "tm_sch_port_shap_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(13), + .msg = "tm_sch_port_shap_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(14), + .msg = "tm_sch_pg_pshap_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(15), + .msg = "tm_sch_pg_pshap_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(16), + .msg = "tm_sch_pg_cshap_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(17), + .msg = "tm_sch_pg_cshap_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(18), + .msg = "tm_sch_pri_pshap_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(19), + .msg = "tm_sch_pri_pshap_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(20), + .msg = "tm_sch_pri_cshap_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(21), + .msg = "tm_sch_pri_cshap_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(22), + .msg = "tm_sch_rq_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(23), + .msg = "tm_sch_rq_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(24), + .msg = "tm_sch_nq_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(25), + .msg = "tm_sch_nq_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(26), + .msg = "tm_sch_roce_up_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(27), + .msg = "tm_sch_roce_up_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(28), + .msg = "tm_sch_rcb_byte_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(29), + .msg = "tm_sch_rcb_byte_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(30), + .msg = "tm_sch_ssu_byte_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(31), + .msg = "tm_sch_ssu_byte_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } +}; + +static const struct hclge_hw_error hclge_qcn_fifo_rint[] = { + { + .int_msk = BIT(0), + .msg = "qcn_shap_gp0_sch_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(1), + .msg = "qcn_shap_gp0_sch_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(2), + .msg = "qcn_shap_gp1_sch_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(3), + .msg = "qcn_shap_gp1_sch_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(4), + .msg = "qcn_shap_gp2_sch_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(5), + .msg = "qcn_shap_gp2_sch_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(6), + .msg = "qcn_shap_gp3_sch_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(7), + .msg = "qcn_shap_gp3_sch_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(8), + .msg = "qcn_shap_gp0_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(9), + .msg = "qcn_shap_gp0_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(10), + .msg = "qcn_shap_gp1_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(11), + .msg = "qcn_shap_gp1_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(12), + .msg = "qcn_shap_gp2_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(13), + .msg = "qcn_shap_gp2_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(14), + .msg = "qcn_shap_gp3_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(15), + .msg = "qcn_shap_gp3_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(16), + .msg = "qcn_byte_info_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(17), + .msg = "qcn_byte_info_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } +}; + +static const struct hclge_hw_error hclge_qcn_ecc_rint[] = { + { + .int_msk = BIT(1), + .msg = "qcn_byte_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(3), + .msg = "qcn_time_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(5), + .msg = "qcn_fb_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(7), + .msg = "qcn_link_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(9), + .msg = "qcn_rate_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(11), + .msg = "qcn_tmplt_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(13), + .msg = "qcn_shap_cfg_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(15), + .msg = "qcn_gp0_barrel_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(17), + .msg = "qcn_gp1_barrel_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(19), + .msg = "qcn_gp2_barrel_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(21), + .msg = "qcn_gp3_barral_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } +}; + +static const struct hclge_hw_error hclge_mac_afifo_tnl_int[] = { + { + .int_msk = BIT(0), + .msg = "egu_cge_afifo_ecc_1bit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(1), + .msg = "egu_cge_afifo_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(2), + .msg = "egu_lge_afifo_ecc_1bit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(3), + .msg = "egu_lge_afifo_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(4), + .msg = "cge_igu_afifo_ecc_1bit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(5), + .msg = "cge_igu_afifo_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(6), + .msg = "lge_igu_afifo_ecc_1bit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(7), + .msg = "lge_igu_afifo_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(8), + .msg = "cge_igu_afifo_overflow_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(9), + .msg = "lge_igu_afifo_overflow_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(10), + .msg = "egu_cge_afifo_underrun_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(11), + .msg = "egu_lge_afifo_underrun_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(12), + .msg = "egu_ge_afifo_underrun_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(13), + .msg = "ge_igu_afifo_overflow_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } +}; + +static const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st2[] = { + { + .int_msk = BIT(13), + .msg = "rpu_rx_pkt_bit32_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(14), + .msg = "rpu_rx_pkt_bit33_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(15), + .msg = "rpu_rx_pkt_bit34_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(16), + .msg = "rpu_rx_pkt_bit35_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(17), + .msg = "rcb_tx_ring_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(18), + .msg = "rcb_rx_ring_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(19), + .msg = "rcb_tx_fbd_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(20), + .msg = "rcb_rx_ebd_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(21), + .msg = "rcb_tso_info_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(22), + .msg = "rcb_tx_int_info_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(23), + .msg = "rcb_rx_int_info_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(24), + .msg = "tpu_tx_pkt_0_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(25), + .msg = "tpu_tx_pkt_1_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(26), + .msg = "rd_bus_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(27), + .msg = "wr_bus_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(28), + .msg = "reg_search_miss", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(29), + .msg = "rx_q_search_miss", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(30), + .msg = "ooo_ecc_err_detect", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(31), + .msg = "ooo_ecc_err_multpl", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } +}; + +static const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st3[] = { + { + .int_msk = BIT(4), + .msg = "gro_bd_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(5), + .msg = "gro_context_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(6), + .msg = "rx_stash_cfg_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(7), + .msg = "axi_rd_fbd_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } +}; + +static const struct hclge_hw_error hclge_ppu_pf_abnormal_int[] = { + { + .int_msk = BIT(0), + .msg = "over_8bd_no_fe", + .reset_level = HNAE3_FUNC_RESET + }, { + .int_msk = BIT(1), + .msg = "tso_mss_cmp_min_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(2), + .msg = "tso_mss_cmp_max_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(3), + .msg = "tx_rd_fbd_poison", + .reset_level = HNAE3_FUNC_RESET + }, { + .int_msk = BIT(4), + .msg = "rx_rd_ebd_poison", + .reset_level = HNAE3_FUNC_RESET + }, { + .int_msk = BIT(5), + .msg = "buf_wait_timeout", + .reset_level = HNAE3_NONE_RESET + }, { + /* sentinel */ + } +}; + +static const struct hclge_hw_error hclge_ssu_com_err_int[] = { + { + .int_msk = BIT(0), + .msg = "buf_sum_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(1), + .msg = "ppp_mb_num_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(2), + .msg = "ppp_mbid_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(3), + .msg = "ppp_rlt_mac_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(4), + .msg = "ppp_rlt_host_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(5), + .msg = "cks_edit_position_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(6), + .msg = "cks_edit_condition_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(7), + .msg = "vlan_edit_condition_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(8), + .msg = "vlan_num_ot_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(9), + .msg = "vlan_num_in_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } +}; + +#define HCLGE_SSU_MEM_ECC_ERR(x) \ +{ \ + .int_msk = BIT(x), \ + .msg = "ssu_mem" #x "_ecc_mbit_err", \ + .reset_level = HNAE3_GLOBAL_RESET \ +} + +static const struct hclge_hw_error hclge_ssu_mem_ecc_err_int[] = { + HCLGE_SSU_MEM_ECC_ERR(0), + HCLGE_SSU_MEM_ECC_ERR(1), + HCLGE_SSU_MEM_ECC_ERR(2), + HCLGE_SSU_MEM_ECC_ERR(3), + HCLGE_SSU_MEM_ECC_ERR(4), + HCLGE_SSU_MEM_ECC_ERR(5), + HCLGE_SSU_MEM_ECC_ERR(6), + HCLGE_SSU_MEM_ECC_ERR(7), + HCLGE_SSU_MEM_ECC_ERR(8), + HCLGE_SSU_MEM_ECC_ERR(9), + HCLGE_SSU_MEM_ECC_ERR(10), + HCLGE_SSU_MEM_ECC_ERR(11), + HCLGE_SSU_MEM_ECC_ERR(12), + HCLGE_SSU_MEM_ECC_ERR(13), + HCLGE_SSU_MEM_ECC_ERR(14), + HCLGE_SSU_MEM_ECC_ERR(15), + HCLGE_SSU_MEM_ECC_ERR(16), + HCLGE_SSU_MEM_ECC_ERR(17), + HCLGE_SSU_MEM_ECC_ERR(18), + HCLGE_SSU_MEM_ECC_ERR(19), + HCLGE_SSU_MEM_ECC_ERR(20), + HCLGE_SSU_MEM_ECC_ERR(21), + HCLGE_SSU_MEM_ECC_ERR(22), + HCLGE_SSU_MEM_ECC_ERR(23), + HCLGE_SSU_MEM_ECC_ERR(24), + HCLGE_SSU_MEM_ECC_ERR(25), + HCLGE_SSU_MEM_ECC_ERR(26), + HCLGE_SSU_MEM_ECC_ERR(27), + HCLGE_SSU_MEM_ECC_ERR(28), + HCLGE_SSU_MEM_ECC_ERR(29), + HCLGE_SSU_MEM_ECC_ERR(30), + HCLGE_SSU_MEM_ECC_ERR(31), + { /* sentinel */ } +}; + +static const struct hclge_hw_error hclge_ssu_port_based_err_int[] = { + { + .int_msk = BIT(0), + .msg = "roc_pkt_without_key_port", + .reset_level = HNAE3_FUNC_RESET + }, { + .int_msk = BIT(1), + .msg = "tpu_pkt_without_key_port", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(2), + .msg = "igu_pkt_without_key_port", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(3), + .msg = "roc_eof_mis_match_port", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(4), + .msg = "tpu_eof_mis_match_port", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(5), + .msg = "igu_eof_mis_match_port", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(6), + .msg = "roc_sof_mis_match_port", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(7), + .msg = "tpu_sof_mis_match_port", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(8), + .msg = "igu_sof_mis_match_port", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(11), + .msg = "ets_rd_int_rx_port", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(12), + .msg = "ets_wr_int_rx_port", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(13), + .msg = "ets_rd_int_tx_port", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(14), + .msg = "ets_wr_int_tx_port", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } +}; + +static const struct hclge_hw_error hclge_ssu_fifo_overflow_int[] = { + { + .int_msk = BIT(0), + .msg = "ig_mac_inf_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(1), + .msg = "ig_host_inf_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(2), + .msg = "ig_roc_buf_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(3), + .msg = "ig_host_data_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(4), + .msg = "ig_host_key_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(5), + .msg = "tx_qcn_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(6), + .msg = "rx_qcn_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(7), + .msg = "tx_pf_rd_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(8), + .msg = "rx_pf_rd_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(9), + .msg = "qm_eof_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(10), + .msg = "mb_rlt_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(11), + .msg = "dup_uncopy_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(12), + .msg = "dup_cnt_rd_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(13), + .msg = "dup_cnt_drop_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(14), + .msg = "dup_cnt_wrb_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(15), + .msg = "host_cmd_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(16), + .msg = "mac_cmd_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(17), + .msg = "host_cmd_bitmap_empty_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(18), + .msg = "mac_cmd_bitmap_empty_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(19), + .msg = "dup_bitmap_empty_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(20), + .msg = "out_queue_bitmap_empty_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(21), + .msg = "bank2_bitmap_empty_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(22), + .msg = "bank1_bitmap_empty_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(23), + .msg = "bank0_bitmap_empty_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } +}; + +static const struct hclge_hw_error hclge_ssu_ets_tcg_int[] = { + { + .int_msk = BIT(0), + .msg = "ets_rd_int_rx_tcg", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(1), + .msg = "ets_wr_int_rx_tcg", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(2), + .msg = "ets_rd_int_tx_tcg", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(3), + .msg = "ets_wr_int_tx_tcg", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } +}; + +static const struct hclge_hw_error hclge_ssu_port_based_pf_int[] = { + { + .int_msk = BIT(0), + .msg = "roc_pkt_without_key_port", + .reset_level = HNAE3_FUNC_RESET + }, { + .int_msk = BIT(9), + .msg = "low_water_line_err_port", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(10), + .msg = "hi_water_line_err_port", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } +}; + +static const struct hclge_hw_error hclge_rocee_qmm_ovf_err_int[] = { + { + .int_msk = 0, + .msg = "rocee qmm ovf: sgid invalid err" + }, { + .int_msk = 0x4, + .msg = "rocee qmm ovf: sgid ovf err" + }, { + .int_msk = 0x8, + .msg = "rocee qmm ovf: smac invalid err" + }, { + .int_msk = 0xC, + .msg = "rocee qmm ovf: smac ovf err" + }, { + .int_msk = 0x10, + .msg = "rocee qmm ovf: cqc invalid err" + }, { + .int_msk = 0x11, + .msg = "rocee qmm ovf: cqc ovf err" + }, { + .int_msk = 0x12, + .msg = "rocee qmm ovf: cqc hopnum err" + }, { + .int_msk = 0x13, + .msg = "rocee qmm ovf: cqc ba0 err" + }, { + .int_msk = 0x14, + .msg = "rocee qmm ovf: srqc invalid err" + }, { + .int_msk = 0x15, + .msg = "rocee qmm ovf: srqc ovf err" + }, { + .int_msk = 0x16, + .msg = "rocee qmm ovf: srqc hopnum err" + }, { + .int_msk = 0x17, + .msg = "rocee qmm ovf: srqc ba0 err" + }, { + .int_msk = 0x18, + .msg = "rocee qmm ovf: mpt invalid err" + }, { + .int_msk = 0x19, + .msg = "rocee qmm ovf: mpt ovf err" + }, { + .int_msk = 0x1A, + .msg = "rocee qmm ovf: mpt hopnum err" + }, { + .int_msk = 0x1B, + .msg = "rocee qmm ovf: mpt ba0 err" + }, { + .int_msk = 0x1C, + .msg = "rocee qmm ovf: qpc invalid err" + }, { + .int_msk = 0x1D, + .msg = "rocee qmm ovf: qpc ovf err" + }, { + .int_msk = 0x1E, + .msg = "rocee qmm ovf: qpc hopnum err" + }, { + .int_msk = 0x1F, + .msg = "rocee qmm ovf: qpc ba0 err" + }, { + /* sentinel */ + } +}; + +static const struct hclge_hw_module_id hclge_hw_module_id_st[] = { + { + .module_id = MODULE_NONE, + .msg = "MODULE_NONE" + }, { + .module_id = MODULE_BIOS_COMMON, + .msg = "MODULE_BIOS_COMMON" + }, { + .module_id = MODULE_GE, + .msg = "MODULE_GE" + }, { + .module_id = MODULE_IGU_EGU, + .msg = "MODULE_IGU_EGU" + }, { + .module_id = MODULE_LGE, + .msg = "MODULE_LGE" + }, { + .module_id = MODULE_NCSI, + .msg = "MODULE_NCSI" + }, { + .module_id = MODULE_PPP, + .msg = "MODULE_PPP" + }, { + .module_id = MODULE_QCN, + .msg = "MODULE_QCN" + }, { + .module_id = MODULE_RCB_RX, + .msg = "MODULE_RCB_RX" + }, { + .module_id = MODULE_RTC, + .msg = "MODULE_RTC" + }, { + .module_id = MODULE_SSU, + .msg = "MODULE_SSU" + }, { + .module_id = MODULE_TM, + .msg = "MODULE_TM" + }, { + .module_id = MODULE_RCB_TX, + .msg = "MODULE_RCB_TX" + }, { + .module_id = MODULE_TXDMA, + .msg = "MODULE_TXDMA" + }, { + .module_id = MODULE_MASTER, + .msg = "MODULE_MASTER" + }, { + .module_id = MODULE_HIMAC, + .msg = "MODULE_HIMAC" + }, { + .module_id = MODULE_ROCEE_TOP, + .msg = "MODULE_ROCEE_TOP" + }, { + .module_id = MODULE_ROCEE_TIMER, + .msg = "MODULE_ROCEE_TIMER" + }, { + .module_id = MODULE_ROCEE_MDB, + .msg = "MODULE_ROCEE_MDB" + }, { + .module_id = MODULE_ROCEE_TSP, + .msg = "MODULE_ROCEE_TSP" + }, { + .module_id = MODULE_ROCEE_TRP, + .msg = "MODULE_ROCEE_TRP" + }, { + .module_id = MODULE_ROCEE_SCC, + .msg = "MODULE_ROCEE_SCC" + }, { + .module_id = MODULE_ROCEE_CAEP, + .msg = "MODULE_ROCEE_CAEP" + }, { + .module_id = MODULE_ROCEE_GEN_AC, + .msg = "MODULE_ROCEE_GEN_AC" + }, { + .module_id = MODULE_ROCEE_QMM, + .msg = "MODULE_ROCEE_QMM" + }, { + .module_id = MODULE_ROCEE_LSAN, + .msg = "MODULE_ROCEE_LSAN" + } +}; + +static const struct hclge_hw_type_id hclge_hw_type_id_st[] = { + { + .type_id = NONE_ERROR, + .msg = "none_error" + }, { + .type_id = FIFO_ERROR, + .msg = "fifo_error" + }, { + .type_id = MEMORY_ERROR, + .msg = "memory_error" + }, { + .type_id = POISON_ERROR, + .msg = "poison_error" + }, { + .type_id = MSIX_ECC_ERROR, + .msg = "msix_ecc_error" + }, { + .type_id = TQP_INT_ECC_ERROR, + .msg = "tqp_int_ecc_error" + }, { + .type_id = PF_ABNORMAL_INT_ERROR, + .msg = "pf_abnormal_int_error" + }, { + .type_id = MPF_ABNORMAL_INT_ERROR, + .msg = "mpf_abnormal_int_error" + }, { + .type_id = COMMON_ERROR, + .msg = "common_error" + }, { + .type_id = PORT_ERROR, + .msg = "port_error" + }, { + .type_id = ETS_ERROR, + .msg = "ets_error" + }, { + .type_id = NCSI_ERROR, + .msg = "ncsi_error" + }, { + .type_id = GLB_ERROR, + .msg = "glb_error" + }, { + .type_id = LINK_ERROR, + .msg = "link_error" + }, { + .type_id = PTP_ERROR, + .msg = "ptp_error" + }, { + .type_id = ROCEE_NORMAL_ERR, + .msg = "rocee_normal_error" + }, { + .type_id = ROCEE_OVF_ERR, + .msg = "rocee_ovf_error" + }, { + .type_id = ROCEE_BUS_ERR, + .msg = "rocee_bus_error" + }, +}; + +static void hclge_log_error(struct device *dev, char *reg, + const struct hclge_hw_error *err, + u32 err_sts, unsigned long *reset_requests) +{ + while (err->msg) { + if (err->int_msk & err_sts) { + dev_err(dev, "%s %s found [error status=0x%x]\n", + reg, err->msg, err_sts); + if (err->reset_level && + err->reset_level != HNAE3_NONE_RESET) + set_bit(err->reset_level, reset_requests); + } + err++; + } +} + +/* hclge_cmd_query_error: read the error information + * @hdev: pointer to struct hclge_dev + * @desc: descriptor for describing the command + * @cmd: command opcode + * @flag: flag for extended command structure + * + * This function query the error info from hw register/s using command + */ +static int hclge_cmd_query_error(struct hclge_dev *hdev, + struct hclge_desc *desc, u32 cmd, u16 flag) +{ + struct device *dev = &hdev->pdev->dev; + int desc_num = 1; + int ret; + + hclge_cmd_setup_basic_desc(&desc[0], cmd, true); + if (flag) { + desc[0].flag |= cpu_to_le16(flag); + hclge_cmd_setup_basic_desc(&desc[1], cmd, true); + desc_num = 2; + } + + ret = hclge_cmd_send(&hdev->hw, &desc[0], desc_num); + if (ret) + dev_err(dev, "query error cmd failed (%d)\n", ret); + + return ret; +} + +static int hclge_clear_mac_tnl_int(struct hclge_dev *hdev) +{ + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CLEAR_MAC_TNL_INT, false); + desc.data[0] = cpu_to_le32(HCLGE_MAC_TNL_INT_CLR); + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_config_common_hw_err_int(struct hclge_dev *hdev, bool en) +{ + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc[2]; + int ret; + + /* configure common error interrupts */ + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_COMMON_ECC_INT_CFG, false); + desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[1], HCLGE_COMMON_ECC_INT_CFG, false); + + if (en) { + desc[0].data[0] = cpu_to_le32(HCLGE_IMP_TCM_ECC_ERR_INT_EN); + desc[0].data[2] = cpu_to_le32(HCLGE_CMDQ_NIC_ECC_ERR_INT_EN | + HCLGE_CMDQ_ROCEE_ECC_ERR_INT_EN); + desc[0].data[3] = cpu_to_le32(HCLGE_IMP_RD_POISON_ERR_INT_EN); + desc[0].data[4] = cpu_to_le32(HCLGE_TQP_ECC_ERR_INT_EN | + HCLGE_MSIX_SRAM_ECC_ERR_INT_EN); + desc[0].data[5] = cpu_to_le32(HCLGE_IMP_ITCM4_ECC_ERR_INT_EN); + } + + desc[1].data[0] = cpu_to_le32(HCLGE_IMP_TCM_ECC_ERR_INT_EN_MASK); + desc[1].data[2] = cpu_to_le32(HCLGE_CMDQ_NIC_ECC_ERR_INT_EN_MASK | + HCLGE_CMDQ_ROCEE_ECC_ERR_INT_EN_MASK); + desc[1].data[3] = cpu_to_le32(HCLGE_IMP_RD_POISON_ERR_INT_EN_MASK); + desc[1].data[4] = cpu_to_le32(HCLGE_TQP_ECC_ERR_INT_EN_MASK | + HCLGE_MSIX_SRAM_ECC_ERR_INT_EN_MASK); + desc[1].data[5] = cpu_to_le32(HCLGE_IMP_ITCM4_ECC_ERR_INT_EN_MASK); + + ret = hclge_cmd_send(&hdev->hw, &desc[0], 2); + if (ret) + dev_err(dev, + "fail(%d) to configure common err interrupts\n", ret); + + return ret; +} + +static int hclge_config_ncsi_hw_err_int(struct hclge_dev *hdev, bool en) +{ + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc; + int ret; + + if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) + return 0; + + /* configure NCSI error interrupts */ + hclge_cmd_setup_basic_desc(&desc, HCLGE_NCSI_INT_EN, false); + if (en) + desc.data[0] = cpu_to_le32(HCLGE_NCSI_ERR_INT_EN); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(dev, + "fail(%d) to configure NCSI error interrupts\n", ret); + + return ret; +} + +static int hclge_config_igu_egu_hw_err_int(struct hclge_dev *hdev, bool en) +{ + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc; + int ret; + + /* configure IGU,EGU error interrupts */ + hclge_cmd_setup_basic_desc(&desc, HCLGE_IGU_COMMON_INT_EN, false); + desc.data[0] = cpu_to_le32(HCLGE_IGU_ERR_INT_TYPE); + if (en) + desc.data[0] |= cpu_to_le32(HCLGE_IGU_ERR_INT_EN); + + desc.data[1] = cpu_to_le32(HCLGE_IGU_ERR_INT_EN_MASK); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(dev, + "fail(%d) to configure IGU common interrupts\n", ret); + return ret; + } + + hclge_cmd_setup_basic_desc(&desc, HCLGE_IGU_EGU_TNL_INT_EN, false); + if (en) + desc.data[0] = cpu_to_le32(HCLGE_IGU_TNL_ERR_INT_EN); + + desc.data[1] = cpu_to_le32(HCLGE_IGU_TNL_ERR_INT_EN_MASK); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(dev, + "fail(%d) to configure IGU-EGU TNL interrupts\n", ret); + return ret; + } + + ret = hclge_config_ncsi_hw_err_int(hdev, en); + + return ret; +} + +static int hclge_config_ppp_error_interrupt(struct hclge_dev *hdev, u32 cmd, + bool en) +{ + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc[2]; + int ret; + + /* configure PPP error interrupts */ + hclge_cmd_setup_basic_desc(&desc[0], cmd, false); + desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[1], cmd, false); + + if (cmd == HCLGE_PPP_CMD0_INT_CMD) { + if (en) { + desc[0].data[0] = + cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT0_EN); + desc[0].data[1] = + cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT1_EN); + desc[0].data[4] = cpu_to_le32(HCLGE_PPP_PF_ERR_INT_EN); + } + + desc[1].data[0] = + cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT0_EN_MASK); + desc[1].data[1] = + cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT1_EN_MASK); + if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) + desc[1].data[2] = + cpu_to_le32(HCLGE_PPP_PF_ERR_INT_EN_MASK); + } else if (cmd == HCLGE_PPP_CMD1_INT_CMD) { + if (en) { + desc[0].data[0] = + cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT2_EN); + desc[0].data[1] = + cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT3_EN); + } + + desc[1].data[0] = + cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT2_EN_MASK); + desc[1].data[1] = + cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT3_EN_MASK); + } + + ret = hclge_cmd_send(&hdev->hw, &desc[0], 2); + if (ret) + dev_err(dev, "fail(%d) to configure PPP error intr\n", ret); + + return ret; +} + +static int hclge_config_ppp_hw_err_int(struct hclge_dev *hdev, bool en) +{ + int ret; + + ret = hclge_config_ppp_error_interrupt(hdev, HCLGE_PPP_CMD0_INT_CMD, + en); + if (ret) + return ret; + + ret = hclge_config_ppp_error_interrupt(hdev, HCLGE_PPP_CMD1_INT_CMD, + en); + + return ret; +} + +static int hclge_config_tm_hw_err_int(struct hclge_dev *hdev, bool en) +{ + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc; + int ret; + + /* configure TM SCH hw errors */ + hclge_cmd_setup_basic_desc(&desc, HCLGE_TM_SCH_ECC_INT_EN, false); + if (en) + desc.data[0] = cpu_to_le32(HCLGE_TM_SCH_ECC_ERR_INT_EN); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(dev, "fail(%d) to configure TM SCH errors\n", ret); + return ret; + } + + /* configure TM QCN hw errors */ + hclge_cmd_setup_basic_desc(&desc, HCLGE_TM_QCN_MEM_INT_CFG, false); + desc.data[0] = cpu_to_le32(HCLGE_TM_QCN_ERR_INT_TYPE); + if (en) { + desc.data[0] |= cpu_to_le32(HCLGE_TM_QCN_FIFO_INT_EN); + desc.data[1] = cpu_to_le32(HCLGE_TM_QCN_MEM_ERR_INT_EN); + } + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(dev, + "fail(%d) to configure TM QCN mem errors\n", ret); + + return ret; +} + +static int hclge_config_mac_err_int(struct hclge_dev *hdev, bool en) +{ + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc; + int ret; + + /* configure MAC common error interrupts */ + hclge_cmd_setup_basic_desc(&desc, HCLGE_MAC_COMMON_INT_EN, false); + if (en) + desc.data[0] = cpu_to_le32(HCLGE_MAC_COMMON_ERR_INT_EN); + + desc.data[1] = cpu_to_le32(HCLGE_MAC_COMMON_ERR_INT_EN_MASK); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(dev, + "fail(%d) to configure MAC COMMON error intr\n", ret); + + return ret; +} + +int hclge_config_mac_tnl_int(struct hclge_dev *hdev, bool en) +{ + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_TNL_INT_EN, false); + if (en) + desc.data[0] = cpu_to_le32(HCLGE_MAC_TNL_INT_EN); + else + desc.data[0] = 0; + + desc.data[1] = cpu_to_le32(HCLGE_MAC_TNL_INT_EN_MASK); + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_config_ppu_error_interrupts(struct hclge_dev *hdev, u32 cmd, + bool en) +{ + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc[2]; + int desc_num = 1; + int ret; + + /* configure PPU error interrupts */ + if (cmd == HCLGE_PPU_MPF_ECC_INT_CMD) { + hclge_cmd_setup_basic_desc(&desc[0], cmd, false); + desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[1], cmd, false); + if (en) { + desc[0].data[0] = + cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT0_EN); + desc[0].data[1] = + cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT1_EN); + desc[1].data[3] = + cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT3_EN); + desc[1].data[4] = + cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT2_EN); + } + + desc[1].data[0] = + cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT0_EN_MASK); + desc[1].data[1] = + cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT1_EN_MASK); + desc[1].data[2] = + cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT2_EN_MASK); + desc[1].data[3] |= + cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT3_EN_MASK); + desc_num = 2; + } else if (cmd == HCLGE_PPU_MPF_OTHER_INT_CMD) { + hclge_cmd_setup_basic_desc(&desc[0], cmd, false); + if (en) + desc[0].data[0] = + cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT2_EN2); + + desc[0].data[2] = + cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT2_EN2_MASK); + } else if (cmd == HCLGE_PPU_PF_OTHER_INT_CMD) { + hclge_cmd_setup_basic_desc(&desc[0], cmd, false); + if (en) + desc[0].data[0] = + cpu_to_le32(HCLGE_PPU_PF_ABNORMAL_INT_EN); + + desc[0].data[2] = + cpu_to_le32(HCLGE_PPU_PF_ABNORMAL_INT_EN_MASK); + } else { + dev_err(dev, "Invalid cmd to configure PPU error interrupts\n"); + return -EINVAL; + } + + ret = hclge_cmd_send(&hdev->hw, &desc[0], desc_num); + + return ret; +} + +static int hclge_config_ppu_hw_err_int(struct hclge_dev *hdev, bool en) +{ + struct device *dev = &hdev->pdev->dev; + int ret; + + ret = hclge_config_ppu_error_interrupts(hdev, HCLGE_PPU_MPF_ECC_INT_CMD, + en); + if (ret) { + dev_err(dev, "fail(%d) to configure PPU MPF ECC error intr\n", + ret); + return ret; + } + + ret = hclge_config_ppu_error_interrupts(hdev, + HCLGE_PPU_MPF_OTHER_INT_CMD, + en); + if (ret) { + dev_err(dev, "fail(%d) to configure PPU MPF other intr\n", ret); + return ret; + } + + ret = hclge_config_ppu_error_interrupts(hdev, + HCLGE_PPU_PF_OTHER_INT_CMD, en); + if (ret) + dev_err(dev, "fail(%d) to configure PPU PF error interrupts\n", + ret); + return ret; +} + +static int hclge_config_ssu_hw_err_int(struct hclge_dev *hdev, bool en) +{ + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc[2]; + int ret; + + /* configure SSU ecc error interrupts */ + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_SSU_ECC_INT_CMD, false); + desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[1], HCLGE_SSU_ECC_INT_CMD, false); + if (en) { + desc[0].data[0] = cpu_to_le32(HCLGE_SSU_1BIT_ECC_ERR_INT_EN); + desc[0].data[1] = + cpu_to_le32(HCLGE_SSU_MULTI_BIT_ECC_ERR_INT_EN); + desc[0].data[4] = cpu_to_le32(HCLGE_SSU_BIT32_ECC_ERR_INT_EN); + } + + desc[1].data[0] = cpu_to_le32(HCLGE_SSU_1BIT_ECC_ERR_INT_EN_MASK); + desc[1].data[1] = cpu_to_le32(HCLGE_SSU_MULTI_BIT_ECC_ERR_INT_EN_MASK); + desc[1].data[2] = cpu_to_le32(HCLGE_SSU_BIT32_ECC_ERR_INT_EN_MASK); + + ret = hclge_cmd_send(&hdev->hw, &desc[0], 2); + if (ret) { + dev_err(dev, + "fail(%d) to configure SSU ECC error interrupt\n", ret); + return ret; + } + + /* configure SSU common error interrupts */ + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_SSU_COMMON_INT_CMD, false); + desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[1], HCLGE_SSU_COMMON_INT_CMD, false); + + if (en) { + if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) + desc[0].data[0] = + cpu_to_le32(HCLGE_SSU_COMMON_INT_EN); + else + desc[0].data[0] = + cpu_to_le32(HCLGE_SSU_COMMON_INT_EN & ~BIT(5)); + desc[0].data[1] = cpu_to_le32(HCLGE_SSU_PORT_BASED_ERR_INT_EN); + desc[0].data[2] = + cpu_to_le32(HCLGE_SSU_FIFO_OVERFLOW_ERR_INT_EN); + } + + desc[1].data[0] = cpu_to_le32(HCLGE_SSU_COMMON_INT_EN_MASK | + HCLGE_SSU_PORT_BASED_ERR_INT_EN_MASK); + desc[1].data[1] = cpu_to_le32(HCLGE_SSU_FIFO_OVERFLOW_ERR_INT_EN_MASK); + + ret = hclge_cmd_send(&hdev->hw, &desc[0], 2); + if (ret) + dev_err(dev, + "fail(%d) to configure SSU COMMON error intr\n", ret); + + return ret; +} + +/* hclge_query_bd_num: query number of buffer descriptors + * @hdev: pointer to struct hclge_dev + * @is_ras: true for ras, false for msix + * @mpf_bd_num: number of main PF interrupt buffer descriptors + * @pf_bd_num: number of not main PF interrupt buffer descriptors + * + * This function querys number of mpf and pf buffer descriptors. + */ +static int hclge_query_bd_num(struct hclge_dev *hdev, bool is_ras, + u32 *mpf_bd_num, u32 *pf_bd_num) +{ + struct device *dev = &hdev->pdev->dev; + u32 mpf_min_bd_num, pf_min_bd_num; + enum hclge_opcode_type opcode; + struct hclge_desc desc_bd; + int ret; + + if (is_ras) { + opcode = HCLGE_QUERY_RAS_INT_STS_BD_NUM; + mpf_min_bd_num = HCLGE_MPF_RAS_INT_MIN_BD_NUM; + pf_min_bd_num = HCLGE_PF_RAS_INT_MIN_BD_NUM; + } else { + opcode = HCLGE_QUERY_MSIX_INT_STS_BD_NUM; + mpf_min_bd_num = HCLGE_MPF_MSIX_INT_MIN_BD_NUM; + pf_min_bd_num = HCLGE_PF_MSIX_INT_MIN_BD_NUM; + } + + hclge_cmd_setup_basic_desc(&desc_bd, opcode, true); + ret = hclge_cmd_send(&hdev->hw, &desc_bd, 1); + if (ret) { + dev_err(dev, "fail(%d) to query msix int status bd num\n", + ret); + return ret; + } + + *mpf_bd_num = le32_to_cpu(desc_bd.data[0]); + *pf_bd_num = le32_to_cpu(desc_bd.data[1]); + if (*mpf_bd_num < mpf_min_bd_num || *pf_bd_num < pf_min_bd_num) { + dev_err(dev, "Invalid bd num: mpf(%u), pf(%u)\n", + *mpf_bd_num, *pf_bd_num); + return -EINVAL; + } + + return 0; +} + +/* hclge_handle_mpf_ras_error: handle all main PF RAS errors + * @hdev: pointer to struct hclge_dev + * @desc: descriptor for describing the command + * @num: number of extended command structures + * + * This function handles all the main PF RAS errors in the + * hw register/s using command. + */ +static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev, + struct hclge_desc *desc, + int num) +{ + struct hnae3_ae_dev *ae_dev = hdev->ae_dev; + struct device *dev = &hdev->pdev->dev; + __le32 *desc_data; + u32 status; + int ret; + + /* query all main PF RAS errors */ + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_MPF_RAS_INT, + true); + ret = hclge_cmd_send(&hdev->hw, &desc[0], num); + if (ret) { + dev_err(dev, "query all mpf ras int cmd failed (%d)\n", ret); + return ret; + } + + /* log HNS common errors */ + status = le32_to_cpu(desc[0].data[0]); + if (status) + hclge_log_error(dev, "IMP_TCM_ECC_INT_STS", + &hclge_imp_tcm_ecc_int[0], status, + &ae_dev->hw_err_reset_req); + + status = le32_to_cpu(desc[0].data[1]); + if (status) + hclge_log_error(dev, "CMDQ_MEM_ECC_INT_STS", + &hclge_cmdq_nic_mem_ecc_int[0], status, + &ae_dev->hw_err_reset_req); + + if ((le32_to_cpu(desc[0].data[2])) & BIT(0)) + dev_warn(dev, "imp_rd_data_poison_err found\n"); + + status = le32_to_cpu(desc[0].data[3]); + if (status) + hclge_log_error(dev, "TQP_INT_ECC_INT_STS", + &hclge_tqp_int_ecc_int[0], status, + &ae_dev->hw_err_reset_req); + + status = le32_to_cpu(desc[0].data[4]); + if (status) + hclge_log_error(dev, "MSIX_ECC_INT_STS", + &hclge_msix_sram_ecc_int[0], status, + &ae_dev->hw_err_reset_req); + + /* log SSU(Storage Switch Unit) errors */ + desc_data = (__le32 *)&desc[2]; + status = le32_to_cpu(*(desc_data + 2)); + if (status) + hclge_log_error(dev, "SSU_ECC_MULTI_BIT_INT_0", + &hclge_ssu_mem_ecc_err_int[0], status, + &ae_dev->hw_err_reset_req); + + status = le32_to_cpu(*(desc_data + 3)) & BIT(0); + if (status) { + dev_err(dev, "SSU_ECC_MULTI_BIT_INT_1 ssu_mem32_ecc_mbit_err found [error status=0x%x]\n", + status); + set_bit(HNAE3_GLOBAL_RESET, &ae_dev->hw_err_reset_req); + } + + status = le32_to_cpu(*(desc_data + 4)) & HCLGE_SSU_COMMON_ERR_INT_MASK; + if (status) + hclge_log_error(dev, "SSU_COMMON_ERR_INT", + &hclge_ssu_com_err_int[0], status, + &ae_dev->hw_err_reset_req); + + /* log IGU(Ingress Unit) errors */ + desc_data = (__le32 *)&desc[3]; + status = le32_to_cpu(*desc_data) & HCLGE_IGU_INT_MASK; + if (status) + hclge_log_error(dev, "IGU_INT_STS", + &hclge_igu_int[0], status, + &ae_dev->hw_err_reset_req); + + /* log PPP(Programmable Packet Process) errors */ + desc_data = (__le32 *)&desc[4]; + status = le32_to_cpu(*(desc_data + 1)); + if (status) + hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST1", + &hclge_ppp_mpf_abnormal_int_st1[0], status, + &ae_dev->hw_err_reset_req); + + status = le32_to_cpu(*(desc_data + 3)) & HCLGE_PPP_MPF_INT_ST3_MASK; + if (status) + hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST3", + &hclge_ppp_mpf_abnormal_int_st3[0], status, + &ae_dev->hw_err_reset_req); + + /* log PPU(RCB) errors */ + desc_data = (__le32 *)&desc[5]; + status = le32_to_cpu(*(desc_data + 1)); + if (status) { + dev_err(dev, + "PPU_MPF_ABNORMAL_INT_ST1 rpu_rx_pkt_ecc_mbit_err found\n"); + set_bit(HNAE3_GLOBAL_RESET, &ae_dev->hw_err_reset_req); + } + + status = le32_to_cpu(*(desc_data + 2)); + if (status) + hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST2", + &hclge_ppu_mpf_abnormal_int_st2[0], status, + &ae_dev->hw_err_reset_req); + + status = le32_to_cpu(*(desc_data + 3)) & HCLGE_PPU_MPF_INT_ST3_MASK; + if (status) + hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST3", + &hclge_ppu_mpf_abnormal_int_st3[0], status, + &ae_dev->hw_err_reset_req); + + /* log TM(Traffic Manager) errors */ + desc_data = (__le32 *)&desc[6]; + status = le32_to_cpu(*desc_data); + if (status) + hclge_log_error(dev, "TM_SCH_RINT", + &hclge_tm_sch_rint[0], status, + &ae_dev->hw_err_reset_req); + + /* log QCN(Quantized Congestion Control) errors */ + desc_data = (__le32 *)&desc[7]; + status = le32_to_cpu(*desc_data) & HCLGE_QCN_FIFO_INT_MASK; + if (status) + hclge_log_error(dev, "QCN_FIFO_RINT", + &hclge_qcn_fifo_rint[0], status, + &ae_dev->hw_err_reset_req); + + status = le32_to_cpu(*(desc_data + 1)) & HCLGE_QCN_ECC_INT_MASK; + if (status) + hclge_log_error(dev, "QCN_ECC_RINT", + &hclge_qcn_ecc_rint[0], status, + &ae_dev->hw_err_reset_req); + + /* log NCSI errors */ + desc_data = (__le32 *)&desc[9]; + status = le32_to_cpu(*desc_data) & HCLGE_NCSI_ECC_INT_MASK; + if (status) + hclge_log_error(dev, "NCSI_ECC_INT_RPT", + &hclge_ncsi_err_int[0], status, + &ae_dev->hw_err_reset_req); + + /* clear all main PF RAS errors */ + hclge_comm_cmd_reuse_desc(&desc[0], false); + ret = hclge_cmd_send(&hdev->hw, &desc[0], num); + if (ret) + dev_err(dev, "clear all mpf ras int cmd failed (%d)\n", ret); + + return ret; +} + +/* hclge_handle_pf_ras_error: handle all PF RAS errors + * @hdev: pointer to struct hclge_dev + * @desc: descriptor for describing the command + * @num: number of extended command structures + * + * This function handles all the PF RAS errors in the + * hw registers using command. + */ +static int hclge_handle_pf_ras_error(struct hclge_dev *hdev, + struct hclge_desc *desc, + int num) +{ + struct hnae3_ae_dev *ae_dev = hdev->ae_dev; + struct device *dev = &hdev->pdev->dev; + __le32 *desc_data; + u32 status; + int ret; + + /* query all PF RAS errors */ + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_PF_RAS_INT, + true); + ret = hclge_cmd_send(&hdev->hw, &desc[0], num); + if (ret) { + dev_err(dev, "query all pf ras int cmd failed (%d)\n", ret); + return ret; + } + + /* log SSU(Storage Switch Unit) errors */ + status = le32_to_cpu(desc[0].data[0]); + if (status) + hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT", + &hclge_ssu_port_based_err_int[0], status, + &ae_dev->hw_err_reset_req); + + status = le32_to_cpu(desc[0].data[1]); + if (status) + hclge_log_error(dev, "SSU_FIFO_OVERFLOW_INT", + &hclge_ssu_fifo_overflow_int[0], status, + &ae_dev->hw_err_reset_req); + + status = le32_to_cpu(desc[0].data[2]); + if (status) + hclge_log_error(dev, "SSU_ETS_TCG_INT", + &hclge_ssu_ets_tcg_int[0], status, + &ae_dev->hw_err_reset_req); + + /* log IGU(Ingress Unit) EGU(Egress Unit) TNL errors */ + desc_data = (__le32 *)&desc[1]; + status = le32_to_cpu(*desc_data) & HCLGE_IGU_EGU_TNL_INT_MASK; + if (status) + hclge_log_error(dev, "IGU_EGU_TNL_INT_STS", + &hclge_igu_egu_tnl_int[0], status, + &ae_dev->hw_err_reset_req); + + /* log PPU(RCB) errors */ + desc_data = (__le32 *)&desc[3]; + status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_INT_RAS_MASK; + if (status) { + hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST0", + &hclge_ppu_pf_abnormal_int[0], status, + &ae_dev->hw_err_reset_req); + hclge_report_hw_error(hdev, HNAE3_PPU_POISON_ERROR); + } + + /* clear all PF RAS errors */ + hclge_comm_cmd_reuse_desc(&desc[0], false); + ret = hclge_cmd_send(&hdev->hw, &desc[0], num); + if (ret) + dev_err(dev, "clear all pf ras int cmd failed (%d)\n", ret); + + return ret; +} + +static int hclge_handle_all_ras_errors(struct hclge_dev *hdev) +{ + u32 mpf_bd_num, pf_bd_num, bd_num; + struct hclge_desc *desc; + int ret; + + /* query the number of registers in the RAS int status */ + ret = hclge_query_bd_num(hdev, true, &mpf_bd_num, &pf_bd_num); + if (ret) + return ret; + + bd_num = max_t(u32, mpf_bd_num, pf_bd_num); + desc = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL); + if (!desc) + return -ENOMEM; + + /* handle all main PF RAS errors */ + ret = hclge_handle_mpf_ras_error(hdev, desc, mpf_bd_num); + if (ret) { + kfree(desc); + return ret; + } + memset(desc, 0, bd_num * sizeof(struct hclge_desc)); + + /* handle all PF RAS errors */ + ret = hclge_handle_pf_ras_error(hdev, desc, pf_bd_num); + kfree(desc); + + return ret; +} + +static int hclge_log_rocee_axi_error(struct hclge_dev *hdev) +{ + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc[3]; + int ret; + + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_ROCEE_AXI_RAS_INFO_CMD, + true); + hclge_cmd_setup_basic_desc(&desc[1], HCLGE_QUERY_ROCEE_AXI_RAS_INFO_CMD, + true); + hclge_cmd_setup_basic_desc(&desc[2], HCLGE_QUERY_ROCEE_AXI_RAS_INFO_CMD, + true); + desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + + ret = hclge_cmd_send(&hdev->hw, &desc[0], 3); + if (ret) { + dev_err(dev, "failed(%d) to query ROCEE AXI error sts\n", ret); + return ret; + } + + dev_err(dev, "AXI1: %08X %08X %08X %08X %08X %08X\n", + le32_to_cpu(desc[0].data[0]), le32_to_cpu(desc[0].data[1]), + le32_to_cpu(desc[0].data[2]), le32_to_cpu(desc[0].data[3]), + le32_to_cpu(desc[0].data[4]), le32_to_cpu(desc[0].data[5])); + dev_err(dev, "AXI2: %08X %08X %08X %08X %08X %08X\n", + le32_to_cpu(desc[1].data[0]), le32_to_cpu(desc[1].data[1]), + le32_to_cpu(desc[1].data[2]), le32_to_cpu(desc[1].data[3]), + le32_to_cpu(desc[1].data[4]), le32_to_cpu(desc[1].data[5])); + dev_err(dev, "AXI3: %08X %08X %08X %08X\n", + le32_to_cpu(desc[2].data[0]), le32_to_cpu(desc[2].data[1]), + le32_to_cpu(desc[2].data[2]), le32_to_cpu(desc[2].data[3])); + + return 0; +} + +static int hclge_log_rocee_ecc_error(struct hclge_dev *hdev) +{ + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc[2]; + int ret; + + ret = hclge_cmd_query_error(hdev, &desc[0], + HCLGE_QUERY_ROCEE_ECC_RAS_INFO_CMD, + HCLGE_COMM_CMD_FLAG_NEXT); + if (ret) { + dev_err(dev, "failed(%d) to query ROCEE ECC error sts\n", ret); + return ret; + } + + dev_err(dev, "ECC1: %08X %08X %08X %08X %08X %08X\n", + le32_to_cpu(desc[0].data[0]), le32_to_cpu(desc[0].data[1]), + le32_to_cpu(desc[0].data[2]), le32_to_cpu(desc[0].data[3]), + le32_to_cpu(desc[0].data[4]), le32_to_cpu(desc[0].data[5])); + dev_err(dev, "ECC2: %08X %08X %08X\n", le32_to_cpu(desc[1].data[0]), + le32_to_cpu(desc[1].data[1]), le32_to_cpu(desc[1].data[2])); + + return 0; +} + +static int hclge_log_rocee_ovf_error(struct hclge_dev *hdev) +{ + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc[2]; + int ret; + + /* read overflow error status */ + ret = hclge_cmd_query_error(hdev, &desc[0], HCLGE_ROCEE_PF_RAS_INT_CMD, + 0); + if (ret) { + dev_err(dev, "failed(%d) to query ROCEE OVF error sts\n", ret); + return ret; + } + + /* log overflow error */ + if (le32_to_cpu(desc[0].data[0]) & HCLGE_ROCEE_OVF_ERR_INT_MASK) { + const struct hclge_hw_error *err; + u32 err_sts; + + err = &hclge_rocee_qmm_ovf_err_int[0]; + err_sts = HCLGE_ROCEE_OVF_ERR_TYPE_MASK & + le32_to_cpu(desc[0].data[0]); + while (err->msg) { + if (err->int_msk == err_sts) { + dev_err(dev, "%s [error status=0x%x] found\n", + err->msg, + le32_to_cpu(desc[0].data[0])); + break; + } + err++; + } + } + + if (le32_to_cpu(desc[0].data[1]) & HCLGE_ROCEE_OVF_ERR_INT_MASK) { + dev_err(dev, "ROCEE TSP OVF [error status=0x%x] found\n", + le32_to_cpu(desc[0].data[1])); + } + + if (le32_to_cpu(desc[0].data[2]) & HCLGE_ROCEE_OVF_ERR_INT_MASK) { + dev_err(dev, "ROCEE SCC OVF [error status=0x%x] found\n", + le32_to_cpu(desc[0].data[2])); + } + + return 0; +} + +static enum hnae3_reset_type +hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev) +{ + enum hnae3_reset_type reset_type = HNAE3_NONE_RESET; + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc[2]; + unsigned int status; + int ret; + + /* read RAS error interrupt status */ + ret = hclge_cmd_query_error(hdev, &desc[0], + HCLGE_QUERY_CLEAR_ROCEE_RAS_INT, 0); + if (ret) { + dev_err(dev, "failed(%d) to query ROCEE RAS INT SRC\n", ret); + /* reset everything for now */ + return HNAE3_GLOBAL_RESET; + } + + status = le32_to_cpu(desc[0].data[0]); + if (status & HCLGE_ROCEE_AXI_ERR_INT_MASK) { + if (status & HCLGE_ROCEE_RERR_INT_MASK) + dev_err(dev, "ROCEE RAS AXI rresp error\n"); + + if (status & HCLGE_ROCEE_BERR_INT_MASK) + dev_err(dev, "ROCEE RAS AXI bresp error\n"); + + reset_type = HNAE3_FUNC_RESET; + + hclge_report_hw_error(hdev, HNAE3_ROCEE_AXI_RESP_ERROR); + + ret = hclge_log_rocee_axi_error(hdev); + if (ret) + return HNAE3_GLOBAL_RESET; + } + + if (status & HCLGE_ROCEE_ECC_INT_MASK) { + dev_err(dev, "ROCEE RAS 2bit ECC error\n"); + reset_type = HNAE3_GLOBAL_RESET; + + ret = hclge_log_rocee_ecc_error(hdev); + if (ret) + return HNAE3_GLOBAL_RESET; + } + + if (status & HCLGE_ROCEE_OVF_INT_MASK) { + ret = hclge_log_rocee_ovf_error(hdev); + if (ret) { + dev_err(dev, "failed(%d) to process ovf error\n", ret); + /* reset everything for now */ + return HNAE3_GLOBAL_RESET; + } + } + + /* clear error status */ + hclge_comm_cmd_reuse_desc(&desc[0], false); + ret = hclge_cmd_send(&hdev->hw, &desc[0], 1); + if (ret) { + dev_err(dev, "failed(%d) to clear ROCEE RAS error\n", ret); + /* reset everything for now */ + return HNAE3_GLOBAL_RESET; + } + + return reset_type; +} + +int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en) +{ + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc; + int ret; + + if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2 || + !hnae3_dev_roce_supported(hdev)) + return 0; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_CONFIG_ROCEE_RAS_INT_EN, false); + if (en) { + /* enable ROCEE hw error interrupts */ + desc.data[0] = cpu_to_le32(HCLGE_ROCEE_RAS_NFE_INT_EN); + desc.data[1] = cpu_to_le32(HCLGE_ROCEE_RAS_CE_INT_EN); + + hclge_log_and_clear_rocee_ras_error(hdev); + } + desc.data[2] = cpu_to_le32(HCLGE_ROCEE_RAS_NFE_INT_EN_MASK); + desc.data[3] = cpu_to_le32(HCLGE_ROCEE_RAS_CE_INT_EN_MASK); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(dev, "failed(%d) to config ROCEE RAS interrupt\n", ret); + + return ret; +} + +static void hclge_handle_rocee_ras_error(struct hnae3_ae_dev *ae_dev) +{ + struct hclge_dev *hdev = ae_dev->priv; + enum hnae3_reset_type reset_type; + + if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) + return; + + reset_type = hclge_log_and_clear_rocee_ras_error(hdev); + if (reset_type != HNAE3_NONE_RESET) + set_bit(reset_type, &ae_dev->hw_err_reset_req); +} + +static const struct hclge_hw_blk hw_blk[] = { + { + .msk = BIT(0), + .name = "IGU_EGU", + .config_err_int = hclge_config_igu_egu_hw_err_int, + }, { + .msk = BIT(1), + .name = "PPP", + .config_err_int = hclge_config_ppp_hw_err_int, + }, { + .msk = BIT(2), + .name = "SSU", + .config_err_int = hclge_config_ssu_hw_err_int, + }, { + .msk = BIT(3), + .name = "PPU", + .config_err_int = hclge_config_ppu_hw_err_int, + }, { + .msk = BIT(4), + .name = "TM", + .config_err_int = hclge_config_tm_hw_err_int, + }, { + .msk = BIT(5), + .name = "COMMON", + .config_err_int = hclge_config_common_hw_err_int, + }, { + .msk = BIT(8), + .name = "MAC", + .config_err_int = hclge_config_mac_err_int, + }, { + /* sentinel */ + } +}; + +static void hclge_config_all_msix_error(struct hclge_dev *hdev, bool enable) +{ + u32 reg_val; + + reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG); + + if (enable) + reg_val |= BIT(HCLGE_VECTOR0_ALL_MSIX_ERR_B); + else + reg_val &= ~BIT(HCLGE_VECTOR0_ALL_MSIX_ERR_B); + + hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val); +} + +int hclge_config_nic_hw_error(struct hclge_dev *hdev, bool state) +{ + const struct hclge_hw_blk *module = hw_blk; + int ret = 0; + + hclge_config_all_msix_error(hdev, state); + + while (module->name) { + if (module->config_err_int) { + ret = module->config_err_int(hdev, state); + if (ret) + return ret; + } + module++; + } + + return ret; +} + +pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev) +{ + struct hclge_dev *hdev = ae_dev->priv; + struct device *dev = &hdev->pdev->dev; + u32 status; + + if (!test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state)) { + dev_err(dev, + "Can't recover - RAS error reported during dev init\n"); + return PCI_ERS_RESULT_NONE; + } + + status = hclge_read_dev(&hdev->hw, HCLGE_RAS_PF_OTHER_INT_STS_REG); + if (status & HCLGE_RAS_REG_NFE_MASK || + status & HCLGE_RAS_REG_ROCEE_ERR_MASK) + ae_dev->hw_err_reset_req = 0; + else + goto out; + + /* Handling Non-fatal HNS RAS errors */ + if (status & HCLGE_RAS_REG_NFE_MASK) { + dev_err(dev, + "HNS Non-Fatal RAS error(status=0x%x) identified\n", + status); + hclge_handle_all_ras_errors(hdev); + } + + /* Handling Non-fatal Rocee RAS errors */ + if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 && + status & HCLGE_RAS_REG_ROCEE_ERR_MASK) { + dev_err(dev, "ROCEE Non-Fatal RAS error identified\n"); + hclge_handle_rocee_ras_error(ae_dev); + } + + if (ae_dev->hw_err_reset_req) + return PCI_ERS_RESULT_NEED_RESET; + +out: + return PCI_ERS_RESULT_RECOVERED; +} + +static int hclge_clear_hw_msix_error(struct hclge_dev *hdev, + struct hclge_desc *desc, bool is_mpf, + u32 bd_num) +{ + if (is_mpf) + desc[0].opcode = + cpu_to_le16(HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT); + else + desc[0].opcode = cpu_to_le16(HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT); + + desc[0].flag = cpu_to_le16(HCLGE_COMM_CMD_FLAG_NO_INTR | + HCLGE_COMM_CMD_FLAG_IN); + + return hclge_cmd_send(&hdev->hw, &desc[0], bd_num); +} + +/* hclge_query_8bd_info: query information about over_8bd_nfe_err + * @hdev: pointer to struct hclge_dev + * @vf_id: Index of the virtual function with error + * @q_id: Physical index of the queue with error + * + * This function get specific index of queue and function which causes + * over_8bd_nfe_err by using command. If vf_id is 0, it means error is + * caused by PF instead of VF. + */ +static int hclge_query_over_8bd_err_info(struct hclge_dev *hdev, u16 *vf_id, + u16 *q_id) +{ + struct hclge_query_ppu_pf_other_int_dfx_cmd *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PPU_PF_OTHER_INT_DFX, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + return ret; + + req = (struct hclge_query_ppu_pf_other_int_dfx_cmd *)desc.data; + *vf_id = le16_to_cpu(req->over_8bd_no_fe_vf_id); + *q_id = le16_to_cpu(req->over_8bd_no_fe_qid); + + return 0; +} + +/* hclge_handle_over_8bd_err: handle MSI-X error named over_8bd_nfe_err + * @hdev: pointer to struct hclge_dev + * @reset_requests: reset level that we need to trigger later + * + * over_8bd_nfe_err is a special MSI-X because it may caused by a VF, in + * that case, we need to trigger VF reset. Otherwise, a PF reset is needed. + */ +static void hclge_handle_over_8bd_err(struct hclge_dev *hdev, + unsigned long *reset_requests) +{ + struct device *dev = &hdev->pdev->dev; + u16 vf_id; + u16 q_id; + int ret; + + ret = hclge_query_over_8bd_err_info(hdev, &vf_id, &q_id); + if (ret) { + dev_err(dev, "fail(%d) to query over_8bd_no_fe info\n", + ret); + return; + } + + dev_err(dev, "PPU_PF_ABNORMAL_INT_ST over_8bd_no_fe found, vport(%u), queue_id(%u)\n", + vf_id, q_id); + + if (vf_id) { + if (vf_id >= hdev->num_alloc_vport) { + dev_err(dev, "invalid vport(%u)\n", vf_id); + return; + } + + /* If we need to trigger other reset whose level is higher + * than HNAE3_VF_FUNC_RESET, no need to trigger a VF reset + * here. + */ + if (*reset_requests != 0) + return; + + ret = hclge_inform_reset_assert_to_vf(&hdev->vport[vf_id]); + if (ret) + dev_err(dev, "inform reset to vport(%u) failed %d!\n", + vf_id, ret); + } else { + set_bit(HNAE3_FUNC_RESET, reset_requests); + } +} + +/* hclge_handle_mpf_msix_error: handle all main PF MSI-X errors + * @hdev: pointer to struct hclge_dev + * @desc: descriptor for describing the command + * @mpf_bd_num: number of extended command structures + * @reset_requests: record of the reset level that we need + * + * This function handles all the main PF MSI-X errors in the hw register/s + * using command. + */ +static int hclge_handle_mpf_msix_error(struct hclge_dev *hdev, + struct hclge_desc *desc, + int mpf_bd_num, + unsigned long *reset_requests) +{ + struct device *dev = &hdev->pdev->dev; + __le32 *desc_data; + u32 status; + int ret; + /* query all main PF MSIx errors */ + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT, + true); + ret = hclge_cmd_send(&hdev->hw, &desc[0], mpf_bd_num); + if (ret) { + dev_err(dev, "query all mpf msix int cmd failed (%d)\n", ret); + return ret; + } + + /* log MAC errors */ + desc_data = (__le32 *)&desc[1]; + status = le32_to_cpu(*desc_data); + if (status) + hclge_log_error(dev, "MAC_AFIFO_TNL_INT_R", + &hclge_mac_afifo_tnl_int[0], status, + reset_requests); + + /* log PPU(RCB) MPF errors */ + desc_data = (__le32 *)&desc[5]; + status = le32_to_cpu(*(desc_data + 2)) & + HCLGE_PPU_MPF_INT_ST2_MSIX_MASK; + if (status) + dev_err(dev, "PPU_MPF_ABNORMAL_INT_ST2 rx_q_search_miss found [dfx status=0x%x\n]", + status); + + /* clear all main PF MSIx errors */ + ret = hclge_clear_hw_msix_error(hdev, desc, true, mpf_bd_num); + if (ret) + dev_err(dev, "clear all mpf msix int cmd failed (%d)\n", ret); + + return ret; +} + +/* hclge_handle_pf_msix_error: handle all PF MSI-X errors + * @hdev: pointer to struct hclge_dev + * @desc: descriptor for describing the command + * @mpf_bd_num: number of extended command structures + * @reset_requests: record of the reset level that we need + * + * This function handles all the PF MSI-X errors in the hw register/s using + * command. + */ +static int hclge_handle_pf_msix_error(struct hclge_dev *hdev, + struct hclge_desc *desc, + int pf_bd_num, + unsigned long *reset_requests) +{ + struct device *dev = &hdev->pdev->dev; + __le32 *desc_data; + u32 status; + int ret; + + /* query all PF MSIx errors */ + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT, + true); + ret = hclge_cmd_send(&hdev->hw, &desc[0], pf_bd_num); + if (ret) { + dev_err(dev, "query all pf msix int cmd failed (%d)\n", ret); + return ret; + } + + /* log SSU PF errors */ + status = le32_to_cpu(desc[0].data[0]) & HCLGE_SSU_PORT_INT_MSIX_MASK; + if (status) + hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT", + &hclge_ssu_port_based_pf_int[0], + status, reset_requests); + + /* read and log PPP PF errors */ + desc_data = (__le32 *)&desc[2]; + status = le32_to_cpu(*desc_data); + if (status) + hclge_log_error(dev, "PPP_PF_ABNORMAL_INT_ST0", + &hclge_ppp_pf_abnormal_int[0], + status, reset_requests); + + /* log PPU(RCB) PF errors */ + desc_data = (__le32 *)&desc[3]; + status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_INT_MSIX_MASK; + if (status) + hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST", + &hclge_ppu_pf_abnormal_int[0], + status, reset_requests); + + status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_OVER_8BD_ERR_MASK; + if (status) + hclge_handle_over_8bd_err(hdev, reset_requests); + + /* clear all PF MSIx errors */ + ret = hclge_clear_hw_msix_error(hdev, desc, false, pf_bd_num); + if (ret) + dev_err(dev, "clear all pf msix int cmd failed (%d)\n", ret); + + return ret; +} + +static int hclge_handle_all_hw_msix_error(struct hclge_dev *hdev, + unsigned long *reset_requests) +{ + u32 mpf_bd_num, pf_bd_num, bd_num; + struct hclge_desc *desc; + int ret; + + /* query the number of bds for the MSIx int status */ + ret = hclge_query_bd_num(hdev, false, &mpf_bd_num, &pf_bd_num); + if (ret) + goto out; + + bd_num = max_t(u32, mpf_bd_num, pf_bd_num); + desc = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL); + if (!desc) + return -ENOMEM; + + ret = hclge_handle_mpf_msix_error(hdev, desc, mpf_bd_num, + reset_requests); + if (ret) + goto msi_error; + + memset(desc, 0, bd_num * sizeof(struct hclge_desc)); + ret = hclge_handle_pf_msix_error(hdev, desc, pf_bd_num, reset_requests); + if (ret) + goto msi_error; + + ret = hclge_handle_mac_tnl(hdev); + +msi_error: + kfree(desc); +out: + return ret; +} + +int hclge_handle_hw_msix_error(struct hclge_dev *hdev, + unsigned long *reset_requests) +{ + struct device *dev = &hdev->pdev->dev; + + if (!test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state)) { + dev_err(dev, + "failed to handle msix error during dev init\n"); + return -EAGAIN; + } + + return hclge_handle_all_hw_msix_error(hdev, reset_requests); +} + +int hclge_handle_mac_tnl(struct hclge_dev *hdev) +{ + struct hclge_mac_tnl_stats mac_tnl_stats; + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc; + u32 status; + int ret; + + /* query and clear mac tnl interruptions */ + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_TNL_INT, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(dev, "failed to query mac tnl int, ret = %d.\n", ret); + return ret; + } + + status = le32_to_cpu(desc.data[0]); + if (status) { + /* When mac tnl interrupt occurs, we record current time and + * register status here in a fifo, then clear the status. So + * that if link status changes suddenly at some time, we can + * query them by debugfs. + */ + mac_tnl_stats.time = local_clock(); + mac_tnl_stats.status = status; + kfifo_put(&hdev->mac_tnl_log, mac_tnl_stats); + ret = hclge_clear_mac_tnl_int(hdev); + if (ret) + dev_err(dev, "failed to clear mac tnl int, ret = %d.\n", + ret); + } + + return ret; +} + +void hclge_handle_all_hns_hw_errors(struct hnae3_ae_dev *ae_dev) +{ + struct hclge_dev *hdev = ae_dev->priv; + struct device *dev = &hdev->pdev->dev; + u32 mpf_bd_num, pf_bd_num, bd_num; + struct hclge_desc *desc; + u32 status; + int ret; + + ae_dev->hw_err_reset_req = 0; + status = hclge_read_dev(&hdev->hw, HCLGE_RAS_PF_OTHER_INT_STS_REG); + + /* query the number of bds for the MSIx int status */ + ret = hclge_query_bd_num(hdev, false, &mpf_bd_num, &pf_bd_num); + if (ret) + return; + + bd_num = max_t(u32, mpf_bd_num, pf_bd_num); + desc = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL); + if (!desc) + return; + + /* Clear HNS hw errors reported through msix */ + memset(&desc[0].data[0], 0xFF, mpf_bd_num * sizeof(struct hclge_desc) - + HCLGE_DESC_NO_DATA_LEN); + ret = hclge_clear_hw_msix_error(hdev, desc, true, mpf_bd_num); + if (ret) { + dev_err(dev, "fail(%d) to clear mpf msix int during init\n", + ret); + goto msi_error; + } + + memset(&desc[0].data[0], 0xFF, pf_bd_num * sizeof(struct hclge_desc) - + HCLGE_DESC_NO_DATA_LEN); + ret = hclge_clear_hw_msix_error(hdev, desc, false, pf_bd_num); + if (ret) { + dev_err(dev, "fail(%d) to clear pf msix int during init\n", + ret); + goto msi_error; + } + + /* Handle Non-fatal HNS RAS errors */ + if (status & HCLGE_RAS_REG_NFE_MASK) { + dev_err(dev, "HNS hw error(RAS) identified during init\n"); + hclge_handle_all_ras_errors(hdev); + } + +msi_error: + kfree(desc); +} + +bool hclge_find_error_source(struct hclge_dev *hdev) +{ + u32 msix_src_flag, hw_err_src_flag; + + msix_src_flag = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) & + HCLGE_VECTOR0_REG_MSIX_MASK; + + hw_err_src_flag = hclge_read_dev(&hdev->hw, + HCLGE_RAS_PF_OTHER_INT_STS_REG) & + HCLGE_RAS_REG_ERR_MASK; + + return msix_src_flag || hw_err_src_flag; +} + +void hclge_handle_occurred_error(struct hclge_dev *hdev) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); + + if (hclge_find_error_source(hdev)) + hclge_handle_error_info_log(ae_dev); +} + +static void +hclge_handle_error_type_reg_log(struct device *dev, + struct hclge_mod_err_info *mod_info, + struct hclge_type_reg_err_info *type_reg_info) +{ +#define HCLGE_ERR_TYPE_MASK 0x7F +#define HCLGE_ERR_TYPE_IS_RAS_OFFSET 7 + + u8 mod_id, total_module, type_id, total_type, i, is_ras; + u8 index_module = MODULE_NONE; + u8 index_type = NONE_ERROR; + + mod_id = mod_info->mod_id; + type_id = type_reg_info->type_id & HCLGE_ERR_TYPE_MASK; + is_ras = type_reg_info->type_id >> HCLGE_ERR_TYPE_IS_RAS_OFFSET; + + total_module = ARRAY_SIZE(hclge_hw_module_id_st); + total_type = ARRAY_SIZE(hclge_hw_type_id_st); + + for (i = 0; i < total_module; i++) { + if (mod_id == hclge_hw_module_id_st[i].module_id) { + index_module = i; + break; + } + } + + for (i = 0; i < total_type; i++) { + if (type_id == hclge_hw_type_id_st[i].type_id) { + index_type = i; + break; + } + } + + if (index_module != MODULE_NONE && index_type != NONE_ERROR) + dev_err(dev, + "found %s %s, is %s error.\n", + hclge_hw_module_id_st[index_module].msg, + hclge_hw_type_id_st[index_type].msg, + is_ras ? "ras" : "msix"); + else + dev_err(dev, + "unknown module[%u] or type[%u].\n", mod_id, type_id); + + dev_err(dev, "reg_value:\n"); + for (i = 0; i < type_reg_info->reg_num; i++) + dev_err(dev, "0x%08x\n", type_reg_info->hclge_reg[i]); +} + +static void hclge_handle_error_module_log(struct hnae3_ae_dev *ae_dev, + const u32 *buf, u32 buf_size) +{ + struct hclge_type_reg_err_info *type_reg_info; + struct hclge_dev *hdev = ae_dev->priv; + struct device *dev = &hdev->pdev->dev; + struct hclge_mod_err_info *mod_info; + struct hclge_sum_err_info *sum_info; + u8 mod_num, err_num, i; + u32 offset = 0; + + sum_info = (struct hclge_sum_err_info *)&buf[offset++]; + if (sum_info->reset_type && + sum_info->reset_type != HNAE3_NONE_RESET) + set_bit(sum_info->reset_type, &ae_dev->hw_err_reset_req); + mod_num = sum_info->mod_num; + + while (mod_num--) { + if (offset >= buf_size) { + dev_err(dev, "The offset(%u) exceeds buf's size(%u).\n", + offset, buf_size); + return; + } + mod_info = (struct hclge_mod_err_info *)&buf[offset++]; + err_num = mod_info->err_num; + + for (i = 0; i < err_num; i++) { + if (offset >= buf_size) { + dev_err(dev, + "The offset(%u) exceeds buf size(%u).\n", + offset, buf_size); + return; + } + + type_reg_info = (struct hclge_type_reg_err_info *) + &buf[offset++]; + hclge_handle_error_type_reg_log(dev, mod_info, + type_reg_info); + + offset += type_reg_info->reg_num; + } + } +} + +static int hclge_query_all_err_bd_num(struct hclge_dev *hdev, u32 *bd_num) +{ + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc_bd; + int ret; + + hclge_cmd_setup_basic_desc(&desc_bd, HCLGE_QUERY_ALL_ERR_BD_NUM, true); + ret = hclge_cmd_send(&hdev->hw, &desc_bd, 1); + if (ret) { + dev_err(dev, "failed to query error bd_num, ret = %d.\n", ret); + return ret; + } + + *bd_num = le32_to_cpu(desc_bd.data[0]); + if (!(*bd_num)) { + dev_err(dev, "The value of bd_num is 0!\n"); + return -EINVAL; + } + + return 0; +} + +static int hclge_query_all_err_info(struct hclge_dev *hdev, + struct hclge_desc *desc, u32 bd_num) +{ + struct device *dev = &hdev->pdev->dev; + int ret; + + hclge_cmd_setup_basic_desc(desc, HCLGE_QUERY_ALL_ERR_INFO, true); + ret = hclge_cmd_send(&hdev->hw, desc, bd_num); + if (ret) + dev_err(dev, "failed to query error info, ret = %d.\n", ret); + + return ret; +} + +int hclge_handle_error_info_log(struct hnae3_ae_dev *ae_dev) +{ + u32 bd_num, desc_len, buf_len, buf_size, i; + struct hclge_dev *hdev = ae_dev->priv; + struct hclge_desc *desc; + __le32 *desc_data; + u32 *buf; + int ret; + + ret = hclge_query_all_err_bd_num(hdev, &bd_num); + if (ret) + goto out; + + desc_len = bd_num * sizeof(struct hclge_desc); + desc = kzalloc(desc_len, GFP_KERNEL); + if (!desc) { + ret = -ENOMEM; + goto out; + } + + ret = hclge_query_all_err_info(hdev, desc, bd_num); + if (ret) + goto err_desc; + + buf_len = bd_num * sizeof(struct hclge_desc) - HCLGE_DESC_NO_DATA_LEN; + buf_size = buf_len / sizeof(u32); + + desc_data = kzalloc(buf_len, GFP_KERNEL); + if (!desc_data) { + ret = -ENOMEM; + goto err_desc; + } + + buf = kzalloc(buf_len, GFP_KERNEL); + if (!buf) { + ret = -ENOMEM; + goto err_buf_alloc; + } + + memcpy(desc_data, &desc[0].data[0], buf_len); + for (i = 0; i < buf_size; i++) + buf[i] = le32_to_cpu(desc_data[i]); + + hclge_handle_error_module_log(ae_dev, buf, buf_size); + kfree(buf); + +err_buf_alloc: + kfree(desc_data); +err_desc: + kfree(desc); +out: + return ret; +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h new file mode 100644 index 000000000..86be6fb32 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h @@ -0,0 +1,231 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2016-2017 Hisilicon Limited. */ + +#ifndef __HCLGE_ERR_H +#define __HCLGE_ERR_H + +#include "hclge_main.h" +#include "hnae3.h" + +#define HCLGE_MPF_RAS_INT_MIN_BD_NUM 10 +#define HCLGE_PF_RAS_INT_MIN_BD_NUM 4 +#define HCLGE_MPF_MSIX_INT_MIN_BD_NUM 10 +#define HCLGE_PF_MSIX_INT_MIN_BD_NUM 4 + +#define HCLGE_RAS_PF_OTHER_INT_STS_REG 0x20B00 +#define HCLGE_RAS_REG_NFE_MASK 0xFF00 +#define HCLGE_RAS_REG_ROCEE_ERR_MASK 0x3000000 +#define HCLGE_RAS_REG_ERR_MASK \ + (HCLGE_RAS_REG_NFE_MASK | HCLGE_RAS_REG_ROCEE_ERR_MASK) + +#define HCLGE_VECTOR0_REG_MSIX_MASK 0x1FF00 + +#define HCLGE_IMP_TCM_ECC_ERR_INT_EN 0xFFFF0000 +#define HCLGE_IMP_TCM_ECC_ERR_INT_EN_MASK 0xFFFF0000 +#define HCLGE_IMP_ITCM4_ECC_ERR_INT_EN 0x300 +#define HCLGE_IMP_ITCM4_ECC_ERR_INT_EN_MASK 0x300 +#define HCLGE_CMDQ_NIC_ECC_ERR_INT_EN 0xFFFF +#define HCLGE_CMDQ_NIC_ECC_ERR_INT_EN_MASK 0xFFFF +#define HCLGE_CMDQ_ROCEE_ECC_ERR_INT_EN 0xFFFF0000 +#define HCLGE_CMDQ_ROCEE_ECC_ERR_INT_EN_MASK 0xFFFF0000 +#define HCLGE_IMP_RD_POISON_ERR_INT_EN 0x0100 +#define HCLGE_IMP_RD_POISON_ERR_INT_EN_MASK 0x0100 +#define HCLGE_TQP_ECC_ERR_INT_EN 0x0FFF +#define HCLGE_TQP_ECC_ERR_INT_EN_MASK 0x0FFF +#define HCLGE_MSIX_SRAM_ECC_ERR_INT_EN_MASK 0x0F000000 +#define HCLGE_MSIX_SRAM_ECC_ERR_INT_EN 0x0F000000 +#define HCLGE_IGU_ERR_INT_EN 0x0000000F +#define HCLGE_IGU_ERR_INT_TYPE 0x00000660 +#define HCLGE_IGU_ERR_INT_EN_MASK 0x000F +#define HCLGE_IGU_TNL_ERR_INT_EN 0x0002AABF +#define HCLGE_IGU_TNL_ERR_INT_EN_MASK 0x003F +#define HCLGE_PPP_MPF_ECC_ERR_INT0_EN 0xFFFFFFFF +#define HCLGE_PPP_MPF_ECC_ERR_INT0_EN_MASK 0xFFFFFFFF +#define HCLGE_PPP_MPF_ECC_ERR_INT1_EN 0xFFFFFFFF +#define HCLGE_PPP_MPF_ECC_ERR_INT1_EN_MASK 0xFFFFFFFF +#define HCLGE_PPP_PF_ERR_INT_EN 0x0003 +#define HCLGE_PPP_PF_ERR_INT_EN_MASK 0x0003 +#define HCLGE_PPP_MPF_ECC_ERR_INT2_EN 0x003F +#define HCLGE_PPP_MPF_ECC_ERR_INT2_EN_MASK 0x003F +#define HCLGE_PPP_MPF_ECC_ERR_INT3_EN 0x003F +#define HCLGE_PPP_MPF_ECC_ERR_INT3_EN_MASK 0x003F +#define HCLGE_TM_SCH_ECC_ERR_INT_EN 0x3 +#define HCLGE_TM_QCN_ERR_INT_TYPE 0x29 +#define HCLGE_TM_QCN_FIFO_INT_EN 0xFFFF00 +#define HCLGE_TM_QCN_MEM_ERR_INT_EN 0xFFFFFF +#define HCLGE_NCSI_ERR_INT_EN 0x3 +#define HCLGE_NCSI_ERR_INT_TYPE 0x9 +#define HCLGE_MAC_COMMON_ERR_INT_EN 0x107FF +#define HCLGE_MAC_COMMON_ERR_INT_EN_MASK 0x107FF +#define HCLGE_MAC_TNL_INT_EN GENMASK(9, 0) +#define HCLGE_MAC_TNL_INT_EN_MASK GENMASK(9, 0) +#define HCLGE_MAC_TNL_INT_CLR GENMASK(9, 0) +#define HCLGE_PPU_MPF_ABNORMAL_INT0_EN GENMASK(31, 0) +#define HCLGE_PPU_MPF_ABNORMAL_INT0_EN_MASK GENMASK(31, 0) +#define HCLGE_PPU_MPF_ABNORMAL_INT1_EN GENMASK(31, 0) +#define HCLGE_PPU_MPF_ABNORMAL_INT1_EN_MASK GENMASK(31, 0) +#define HCLGE_PPU_MPF_ABNORMAL_INT2_EN 0x3FFF3FFF +#define HCLGE_PPU_MPF_ABNORMAL_INT2_EN_MASK 0x3FFF3FFF +#define HCLGE_PPU_MPF_ABNORMAL_INT2_EN2 0xB +#define HCLGE_PPU_MPF_ABNORMAL_INT2_EN2_MASK 0xB +#define HCLGE_PPU_MPF_ABNORMAL_INT3_EN GENMASK(7, 0) +#define HCLGE_PPU_MPF_ABNORMAL_INT3_EN_MASK GENMASK(23, 16) +#define HCLGE_PPU_PF_ABNORMAL_INT_EN GENMASK(5, 0) +#define HCLGE_PPU_PF_ABNORMAL_INT_EN_MASK GENMASK(5, 0) +#define HCLGE_SSU_1BIT_ECC_ERR_INT_EN GENMASK(31, 0) +#define HCLGE_SSU_1BIT_ECC_ERR_INT_EN_MASK GENMASK(31, 0) +#define HCLGE_SSU_MULTI_BIT_ECC_ERR_INT_EN GENMASK(31, 0) +#define HCLGE_SSU_MULTI_BIT_ECC_ERR_INT_EN_MASK GENMASK(31, 0) +#define HCLGE_SSU_BIT32_ECC_ERR_INT_EN 0x0101 +#define HCLGE_SSU_BIT32_ECC_ERR_INT_EN_MASK 0x0101 +#define HCLGE_SSU_COMMON_INT_EN GENMASK(9, 0) +#define HCLGE_SSU_COMMON_INT_EN_MASK GENMASK(9, 0) +#define HCLGE_SSU_PORT_BASED_ERR_INT_EN 0x0BFF +#define HCLGE_SSU_PORT_BASED_ERR_INT_EN_MASK 0x0BFF0000 +#define HCLGE_SSU_FIFO_OVERFLOW_ERR_INT_EN GENMASK(23, 0) +#define HCLGE_SSU_FIFO_OVERFLOW_ERR_INT_EN_MASK GENMASK(23, 0) + +#define HCLGE_SSU_COMMON_ERR_INT_MASK GENMASK(9, 0) +#define HCLGE_SSU_PORT_INT_MSIX_MASK 0x7BFF +#define HCLGE_IGU_INT_MASK GENMASK(3, 0) +#define HCLGE_IGU_EGU_TNL_INT_MASK GENMASK(5, 0) +#define HCLGE_PPP_MPF_INT_ST3_MASK GENMASK(5, 0) +#define HCLGE_PPU_MPF_INT_ST3_MASK GENMASK(7, 0) +#define HCLGE_PPU_MPF_INT_ST2_MSIX_MASK BIT(29) +#define HCLGE_PPU_PF_INT_RAS_MASK 0x18 +#define HCLGE_PPU_PF_INT_MSIX_MASK 0x26 +#define HCLGE_PPU_PF_OVER_8BD_ERR_MASK 0x01 +#define HCLGE_QCN_FIFO_INT_MASK GENMASK(17, 0) +#define HCLGE_QCN_ECC_INT_MASK GENMASK(21, 0) +#define HCLGE_NCSI_ECC_INT_MASK GENMASK(1, 0) + +#define HCLGE_ROCEE_RAS_NFE_INT_EN 0xF +#define HCLGE_ROCEE_RAS_CE_INT_EN 0x1 +#define HCLGE_ROCEE_RAS_NFE_INT_EN_MASK 0xF +#define HCLGE_ROCEE_RAS_CE_INT_EN_MASK 0x1 +#define HCLGE_ROCEE_RERR_INT_MASK BIT(0) +#define HCLGE_ROCEE_BERR_INT_MASK BIT(1) +#define HCLGE_ROCEE_AXI_ERR_INT_MASK GENMASK(1, 0) +#define HCLGE_ROCEE_ECC_INT_MASK BIT(2) +#define HCLGE_ROCEE_OVF_INT_MASK BIT(3) +#define HCLGE_ROCEE_OVF_ERR_INT_MASK 0x10000 +#define HCLGE_ROCEE_OVF_ERR_TYPE_MASK 0x3F + +#define HCLGE_DESC_DATA_MAX 8 +#define HCLGE_REG_NUM_MAX 256 +#define HCLGE_DESC_NO_DATA_LEN 8 + +enum hclge_err_int_type { + HCLGE_ERR_INT_MSIX = 0, + HCLGE_ERR_INT_RAS_CE = 1, + HCLGE_ERR_INT_RAS_NFE = 2, + HCLGE_ERR_INT_RAS_FE = 3, +}; + +enum hclge_mod_name_list { + MODULE_NONE = 0, + MODULE_BIOS_COMMON = 1, + MODULE_GE = 2, + MODULE_IGU_EGU = 3, + MODULE_LGE = 4, + MODULE_NCSI = 5, + MODULE_PPP = 6, + MODULE_QCN = 7, + MODULE_RCB_RX = 8, + MODULE_RTC = 9, + MODULE_SSU = 10, + MODULE_TM = 11, + MODULE_RCB_TX = 12, + MODULE_TXDMA = 13, + MODULE_MASTER = 14, + MODULE_HIMAC = 15, + /* add new MODULE NAME for NIC here in order */ + MODULE_ROCEE_TOP = 40, + MODULE_ROCEE_TIMER = 41, + MODULE_ROCEE_MDB = 42, + MODULE_ROCEE_TSP = 43, + MODULE_ROCEE_TRP = 44, + MODULE_ROCEE_SCC = 45, + MODULE_ROCEE_CAEP = 46, + MODULE_ROCEE_GEN_AC = 47, + MODULE_ROCEE_QMM = 48, + MODULE_ROCEE_LSAN = 49, + /* add new MODULE NAME for RoCEE here in order */ +}; + +enum hclge_err_type_list { + NONE_ERROR = 0, + FIFO_ERROR = 1, + MEMORY_ERROR = 2, + POISON_ERROR = 3, + MSIX_ECC_ERROR = 4, + TQP_INT_ECC_ERROR = 5, + PF_ABNORMAL_INT_ERROR = 6, + MPF_ABNORMAL_INT_ERROR = 7, + COMMON_ERROR = 8, + PORT_ERROR = 9, + ETS_ERROR = 10, + NCSI_ERROR = 11, + GLB_ERROR = 12, + LINK_ERROR = 13, + PTP_ERROR = 14, + /* add new ERROR TYPE for NIC here in order */ + ROCEE_NORMAL_ERR = 40, + ROCEE_OVF_ERR = 41, + ROCEE_BUS_ERR = 42, + /* add new ERROR TYPE for ROCEE here in order */ +}; + +struct hclge_hw_blk { + u32 msk; + const char *name; + int (*config_err_int)(struct hclge_dev *hdev, bool en); +}; + +struct hclge_hw_error { + u32 int_msk; + const char *msg; + enum hnae3_reset_type reset_level; +}; + +struct hclge_hw_module_id { + enum hclge_mod_name_list module_id; + const char *msg; +}; + +struct hclge_hw_type_id { + enum hclge_err_type_list type_id; + const char *msg; +}; + +struct hclge_sum_err_info { + u8 reset_type; + u8 mod_num; + u8 rsv[2]; +}; + +struct hclge_mod_err_info { + u8 mod_id; + u8 err_num; + u8 rsv[2]; +}; + +struct hclge_type_reg_err_info { + u8 type_id; + u8 reg_num; + u8 rsv[2]; + u32 hclge_reg[HCLGE_REG_NUM_MAX]; +}; + +int hclge_config_mac_tnl_int(struct hclge_dev *hdev, bool en); +int hclge_config_nic_hw_error(struct hclge_dev *hdev, bool state); +int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en); +void hclge_handle_all_hns_hw_errors(struct hnae3_ae_dev *ae_dev); +bool hclge_find_error_source(struct hclge_dev *hdev); +void hclge_handle_occurred_error(struct hclge_dev *hdev); +pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev); +int hclge_handle_hw_msix_error(struct hclge_dev *hdev, + unsigned long *reset_requests); +int hclge_handle_error_info_log(struct hnae3_ae_dev *ae_dev); +int hclge_handle_mac_tnl(struct hclge_dev *hdev); +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c new file mode 100644 index 000000000..48b0cb5ec --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -0,0 +1,13229 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. + +#include <linux/acpi.h> +#include <linux/device.h> +#include <linux/etherdevice.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/pci.h> +#include <linux/platform_device.h> +#include <linux/if_vlan.h> +#include <linux/crash_dump.h> +#include <net/ipv6.h> +#include <net/rtnetlink.h> +#include "hclge_cmd.h" +#include "hclge_dcb.h" +#include "hclge_main.h" +#include "hclge_mbx.h" +#include "hclge_mdio.h" +#include "hclge_tm.h" +#include "hclge_err.h" +#include "hnae3.h" +#include "hclge_devlink.h" +#include "hclge_comm_cmd.h" + +#define HCLGE_NAME "hclge" + +#define HCLGE_BUF_SIZE_UNIT 256U +#define HCLGE_BUF_MUL_BY 2 +#define HCLGE_BUF_DIV_BY 2 +#define NEED_RESERVE_TC_NUM 2 +#define BUF_MAX_PERCENT 100 +#define BUF_RESERVE_PERCENT 90 + +#define HCLGE_RESET_MAX_FAIL_CNT 5 +#define HCLGE_RESET_SYNC_TIME 100 +#define HCLGE_PF_RESET_SYNC_TIME 20 +#define HCLGE_PF_RESET_SYNC_CNT 1500 + +/* Get DFX BD number offset */ +#define HCLGE_DFX_BIOS_BD_OFFSET 1 +#define HCLGE_DFX_SSU_0_BD_OFFSET 2 +#define HCLGE_DFX_SSU_1_BD_OFFSET 3 +#define HCLGE_DFX_IGU_BD_OFFSET 4 +#define HCLGE_DFX_RPU_0_BD_OFFSET 5 +#define HCLGE_DFX_RPU_1_BD_OFFSET 6 +#define HCLGE_DFX_NCSI_BD_OFFSET 7 +#define HCLGE_DFX_RTC_BD_OFFSET 8 +#define HCLGE_DFX_PPP_BD_OFFSET 9 +#define HCLGE_DFX_RCB_BD_OFFSET 10 +#define HCLGE_DFX_TQP_BD_OFFSET 11 +#define HCLGE_DFX_SSU_2_BD_OFFSET 12 + +#define HCLGE_LINK_STATUS_MS 10 + +static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps); +static int hclge_init_vlan_config(struct hclge_dev *hdev); +static void hclge_sync_vlan_filter(struct hclge_dev *hdev); +static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev); +static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle); +static void hclge_rfs_filter_expire(struct hclge_dev *hdev); +static int hclge_clear_arfs_rules(struct hclge_dev *hdev); +static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev, + unsigned long *addr); +static int hclge_set_default_loopback(struct hclge_dev *hdev); + +static void hclge_sync_mac_table(struct hclge_dev *hdev); +static void hclge_restore_hw_table(struct hclge_dev *hdev); +static void hclge_sync_promisc_mode(struct hclge_dev *hdev); +static void hclge_sync_fd_table(struct hclge_dev *hdev); +static void hclge_update_fec_stats(struct hclge_dev *hdev); +static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret, + int wait_cnt); +static int hclge_update_port_info(struct hclge_dev *hdev); + +static struct hnae3_ae_algo ae_algo; + +static struct workqueue_struct *hclge_wq; + +static const struct pci_device_id ae_algo_pci_tbl[] = { + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0}, + /* required last entry */ + {0, } +}; + +MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl); + +static const u32 cmdq_reg_addr_list[] = {HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG, + HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG, + HCLGE_COMM_NIC_CSQ_DEPTH_REG, + HCLGE_COMM_NIC_CSQ_TAIL_REG, + HCLGE_COMM_NIC_CSQ_HEAD_REG, + HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG, + HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG, + HCLGE_COMM_NIC_CRQ_DEPTH_REG, + HCLGE_COMM_NIC_CRQ_TAIL_REG, + HCLGE_COMM_NIC_CRQ_HEAD_REG, + HCLGE_COMM_VECTOR0_CMDQ_SRC_REG, + HCLGE_COMM_CMDQ_INTR_STS_REG, + HCLGE_COMM_CMDQ_INTR_EN_REG, + HCLGE_COMM_CMDQ_INTR_GEN_REG}; + +static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE, + HCLGE_PF_OTHER_INT_REG, + HCLGE_MISC_RESET_STS_REG, + HCLGE_MISC_VECTOR_INT_STS, + HCLGE_GLOBAL_RESET_REG, + HCLGE_FUN_RST_ING, + HCLGE_GRO_EN_REG}; + +static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG, + HCLGE_RING_RX_ADDR_H_REG, + HCLGE_RING_RX_BD_NUM_REG, + HCLGE_RING_RX_BD_LENGTH_REG, + HCLGE_RING_RX_MERGE_EN_REG, + HCLGE_RING_RX_TAIL_REG, + HCLGE_RING_RX_HEAD_REG, + HCLGE_RING_RX_FBD_NUM_REG, + HCLGE_RING_RX_OFFSET_REG, + HCLGE_RING_RX_FBD_OFFSET_REG, + HCLGE_RING_RX_STASH_REG, + HCLGE_RING_RX_BD_ERR_REG, + HCLGE_RING_TX_ADDR_L_REG, + HCLGE_RING_TX_ADDR_H_REG, + HCLGE_RING_TX_BD_NUM_REG, + HCLGE_RING_TX_PRIORITY_REG, + HCLGE_RING_TX_TC_REG, + HCLGE_RING_TX_MERGE_EN_REG, + HCLGE_RING_TX_TAIL_REG, + HCLGE_RING_TX_HEAD_REG, + HCLGE_RING_TX_FBD_NUM_REG, + HCLGE_RING_TX_OFFSET_REG, + HCLGE_RING_TX_EBD_NUM_REG, + HCLGE_RING_TX_EBD_OFFSET_REG, + HCLGE_RING_TX_BD_ERR_REG, + HCLGE_RING_EN_REG}; + +static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG, + HCLGE_TQP_INTR_GL0_REG, + HCLGE_TQP_INTR_GL1_REG, + HCLGE_TQP_INTR_GL2_REG, + HCLGE_TQP_INTR_RL_REG}; + +static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = { + "External Loopback test", + "App Loopback test", + "Serdes serial Loopback test", + "Serdes parallel Loopback test", + "Phy Loopback test" +}; + +static const struct hclge_comm_stats_str g_mac_stats_string[] = { + {"mac_tx_mac_pause_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)}, + {"mac_rx_mac_pause_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)}, + {"mac_tx_pause_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pause_xoff_time)}, + {"mac_rx_pause_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pause_xoff_time)}, + {"mac_tx_control_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)}, + {"mac_rx_control_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)}, + {"mac_tx_pfc_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)}, + {"mac_tx_pfc_pri0_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)}, + {"mac_tx_pfc_pri1_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)}, + {"mac_tx_pfc_pri2_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)}, + {"mac_tx_pfc_pri3_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)}, + {"mac_tx_pfc_pri4_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)}, + {"mac_tx_pfc_pri5_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)}, + {"mac_tx_pfc_pri6_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)}, + {"mac_tx_pfc_pri7_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)}, + {"mac_tx_pfc_pri0_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_xoff_time)}, + {"mac_tx_pfc_pri1_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_xoff_time)}, + {"mac_tx_pfc_pri2_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_xoff_time)}, + {"mac_tx_pfc_pri3_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_xoff_time)}, + {"mac_tx_pfc_pri4_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_xoff_time)}, + {"mac_tx_pfc_pri5_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_xoff_time)}, + {"mac_tx_pfc_pri6_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_xoff_time)}, + {"mac_tx_pfc_pri7_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_xoff_time)}, + {"mac_rx_pfc_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)}, + {"mac_rx_pfc_pri0_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)}, + {"mac_rx_pfc_pri1_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)}, + {"mac_rx_pfc_pri2_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)}, + {"mac_rx_pfc_pri3_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)}, + {"mac_rx_pfc_pri4_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)}, + {"mac_rx_pfc_pri5_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)}, + {"mac_rx_pfc_pri6_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)}, + {"mac_rx_pfc_pri7_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)}, + {"mac_rx_pfc_pri0_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_xoff_time)}, + {"mac_rx_pfc_pri1_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_xoff_time)}, + {"mac_rx_pfc_pri2_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_xoff_time)}, + {"mac_rx_pfc_pri3_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_xoff_time)}, + {"mac_rx_pfc_pri4_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_xoff_time)}, + {"mac_rx_pfc_pri5_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_xoff_time)}, + {"mac_rx_pfc_pri6_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_xoff_time)}, + {"mac_rx_pfc_pri7_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_xoff_time)}, + {"mac_tx_total_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)}, + {"mac_tx_total_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)}, + {"mac_tx_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)}, + {"mac_tx_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)}, + {"mac_tx_good_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)}, + {"mac_tx_bad_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)}, + {"mac_tx_uni_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)}, + {"mac_tx_multi_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)}, + {"mac_tx_broad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)}, + {"mac_tx_undersize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)}, + {"mac_tx_oversize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)}, + {"mac_tx_64_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)}, + {"mac_tx_65_127_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)}, + {"mac_tx_128_255_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)}, + {"mac_tx_256_511_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)}, + {"mac_tx_512_1023_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)}, + {"mac_tx_1024_1518_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)}, + {"mac_tx_1519_2047_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)}, + {"mac_tx_2048_4095_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)}, + {"mac_tx_4096_8191_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)}, + {"mac_tx_8192_9216_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)}, + {"mac_tx_9217_12287_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)}, + {"mac_tx_12288_16383_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)}, + {"mac_tx_1519_max_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)}, + {"mac_tx_1519_max_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)}, + {"mac_rx_total_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)}, + {"mac_rx_total_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)}, + {"mac_rx_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)}, + {"mac_rx_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)}, + {"mac_rx_good_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)}, + {"mac_rx_bad_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)}, + {"mac_rx_uni_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)}, + {"mac_rx_multi_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)}, + {"mac_rx_broad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)}, + {"mac_rx_undersize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)}, + {"mac_rx_oversize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)}, + {"mac_rx_64_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)}, + {"mac_rx_65_127_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)}, + {"mac_rx_128_255_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)}, + {"mac_rx_256_511_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)}, + {"mac_rx_512_1023_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)}, + {"mac_rx_1024_1518_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)}, + {"mac_rx_1519_2047_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)}, + {"mac_rx_2048_4095_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)}, + {"mac_rx_4096_8191_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)}, + {"mac_rx_8192_9216_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)}, + {"mac_rx_9217_12287_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)}, + {"mac_rx_12288_16383_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)}, + {"mac_rx_1519_max_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)}, + {"mac_rx_1519_max_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)}, + + {"mac_tx_fragment_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)}, + {"mac_tx_undermin_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)}, + {"mac_tx_jabber_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)}, + {"mac_tx_err_all_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)}, + {"mac_tx_from_app_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)}, + {"mac_tx_from_app_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)}, + {"mac_rx_fragment_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)}, + {"mac_rx_undermin_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)}, + {"mac_rx_jabber_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)}, + {"mac_rx_fcs_err_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)}, + {"mac_rx_send_app_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)}, + {"mac_rx_send_app_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)} +}; + +static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = { + { + .flags = HCLGE_MAC_MGR_MASK_VLAN_B, + .ethter_type = cpu_to_le16(ETH_P_LLDP), + .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e}, + .i_port_bitmap = 0x1, + }, +}; + +static const u32 hclge_dfx_bd_offset_list[] = { + HCLGE_DFX_BIOS_BD_OFFSET, + HCLGE_DFX_SSU_0_BD_OFFSET, + HCLGE_DFX_SSU_1_BD_OFFSET, + HCLGE_DFX_IGU_BD_OFFSET, + HCLGE_DFX_RPU_0_BD_OFFSET, + HCLGE_DFX_RPU_1_BD_OFFSET, + HCLGE_DFX_NCSI_BD_OFFSET, + HCLGE_DFX_RTC_BD_OFFSET, + HCLGE_DFX_PPP_BD_OFFSET, + HCLGE_DFX_RCB_BD_OFFSET, + HCLGE_DFX_TQP_BD_OFFSET, + HCLGE_DFX_SSU_2_BD_OFFSET +}; + +static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = { + HCLGE_OPC_DFX_BIOS_COMMON_REG, + HCLGE_OPC_DFX_SSU_REG_0, + HCLGE_OPC_DFX_SSU_REG_1, + HCLGE_OPC_DFX_IGU_EGU_REG, + HCLGE_OPC_DFX_RPU_REG_0, + HCLGE_OPC_DFX_RPU_REG_1, + HCLGE_OPC_DFX_NCSI_REG, + HCLGE_OPC_DFX_RTC_REG, + HCLGE_OPC_DFX_PPP_REG, + HCLGE_OPC_DFX_RCB_REG, + HCLGE_OPC_DFX_TQP_REG, + HCLGE_OPC_DFX_SSU_REG_2 +}; + +static const struct key_info meta_data_key_info[] = { + { PACKET_TYPE_ID, 6 }, + { IP_FRAGEMENT, 1 }, + { ROCE_TYPE, 1 }, + { NEXT_KEY, 5 }, + { VLAN_NUMBER, 2 }, + { SRC_VPORT, 12 }, + { DST_VPORT, 12 }, + { TUNNEL_PACKET, 1 }, +}; + +static const struct key_info tuple_key_info[] = { + { OUTER_DST_MAC, 48, KEY_OPT_MAC, -1, -1 }, + { OUTER_SRC_MAC, 48, KEY_OPT_MAC, -1, -1 }, + { OUTER_VLAN_TAG_FST, 16, KEY_OPT_LE16, -1, -1 }, + { OUTER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 }, + { OUTER_ETH_TYPE, 16, KEY_OPT_LE16, -1, -1 }, + { OUTER_L2_RSV, 16, KEY_OPT_LE16, -1, -1 }, + { OUTER_IP_TOS, 8, KEY_OPT_U8, -1, -1 }, + { OUTER_IP_PROTO, 8, KEY_OPT_U8, -1, -1 }, + { OUTER_SRC_IP, 32, KEY_OPT_IP, -1, -1 }, + { OUTER_DST_IP, 32, KEY_OPT_IP, -1, -1 }, + { OUTER_L3_RSV, 16, KEY_OPT_LE16, -1, -1 }, + { OUTER_SRC_PORT, 16, KEY_OPT_LE16, -1, -1 }, + { OUTER_DST_PORT, 16, KEY_OPT_LE16, -1, -1 }, + { OUTER_L4_RSV, 32, KEY_OPT_LE32, -1, -1 }, + { OUTER_TUN_VNI, 24, KEY_OPT_VNI, -1, -1 }, + { OUTER_TUN_FLOW_ID, 8, KEY_OPT_U8, -1, -1 }, + { INNER_DST_MAC, 48, KEY_OPT_MAC, + offsetof(struct hclge_fd_rule, tuples.dst_mac), + offsetof(struct hclge_fd_rule, tuples_mask.dst_mac) }, + { INNER_SRC_MAC, 48, KEY_OPT_MAC, + offsetof(struct hclge_fd_rule, tuples.src_mac), + offsetof(struct hclge_fd_rule, tuples_mask.src_mac) }, + { INNER_VLAN_TAG_FST, 16, KEY_OPT_LE16, + offsetof(struct hclge_fd_rule, tuples.vlan_tag1), + offsetof(struct hclge_fd_rule, tuples_mask.vlan_tag1) }, + { INNER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 }, + { INNER_ETH_TYPE, 16, KEY_OPT_LE16, + offsetof(struct hclge_fd_rule, tuples.ether_proto), + offsetof(struct hclge_fd_rule, tuples_mask.ether_proto) }, + { INNER_L2_RSV, 16, KEY_OPT_LE16, + offsetof(struct hclge_fd_rule, tuples.l2_user_def), + offsetof(struct hclge_fd_rule, tuples_mask.l2_user_def) }, + { INNER_IP_TOS, 8, KEY_OPT_U8, + offsetof(struct hclge_fd_rule, tuples.ip_tos), + offsetof(struct hclge_fd_rule, tuples_mask.ip_tos) }, + { INNER_IP_PROTO, 8, KEY_OPT_U8, + offsetof(struct hclge_fd_rule, tuples.ip_proto), + offsetof(struct hclge_fd_rule, tuples_mask.ip_proto) }, + { INNER_SRC_IP, 32, KEY_OPT_IP, + offsetof(struct hclge_fd_rule, tuples.src_ip), + offsetof(struct hclge_fd_rule, tuples_mask.src_ip) }, + { INNER_DST_IP, 32, KEY_OPT_IP, + offsetof(struct hclge_fd_rule, tuples.dst_ip), + offsetof(struct hclge_fd_rule, tuples_mask.dst_ip) }, + { INNER_L3_RSV, 16, KEY_OPT_LE16, + offsetof(struct hclge_fd_rule, tuples.l3_user_def), + offsetof(struct hclge_fd_rule, tuples_mask.l3_user_def) }, + { INNER_SRC_PORT, 16, KEY_OPT_LE16, + offsetof(struct hclge_fd_rule, tuples.src_port), + offsetof(struct hclge_fd_rule, tuples_mask.src_port) }, + { INNER_DST_PORT, 16, KEY_OPT_LE16, + offsetof(struct hclge_fd_rule, tuples.dst_port), + offsetof(struct hclge_fd_rule, tuples_mask.dst_port) }, + { INNER_L4_RSV, 32, KEY_OPT_LE32, + offsetof(struct hclge_fd_rule, tuples.l4_user_def), + offsetof(struct hclge_fd_rule, tuples_mask.l4_user_def) }, +}; + +/** + * hclge_cmd_send - send command to command queue + * @hw: pointer to the hw struct + * @desc: prefilled descriptor for describing the command + * @num : the number of descriptors to be sent + * + * This is the main send command for command queue, it + * sends the queue, cleans the queue, etc + **/ +int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num) +{ + return hclge_comm_cmd_send(&hw->hw, desc, num); +} + +static int hclge_mac_update_stats_defective(struct hclge_dev *hdev) +{ +#define HCLGE_MAC_CMD_NUM 21 + + u64 *data = (u64 *)(&hdev->mac_stats); + struct hclge_desc desc[HCLGE_MAC_CMD_NUM]; + __le64 *desc_data; + u32 data_size; + int ret; + u32 i; + + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true); + ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM); + if (ret) { + dev_err(&hdev->pdev->dev, + "Get MAC pkt stats fail, status = %d.\n", ret); + + return ret; + } + + /* The first desc has a 64-bit header, so data size need to minus 1 */ + data_size = sizeof(desc) / (sizeof(u64)) - 1; + + desc_data = (__le64 *)(&desc[0].data[0]); + for (i = 0; i < data_size; i++) { + /* data memory is continuous becase only the first desc has a + * header in this command + */ + *data += le64_to_cpu(*desc_data); + data++; + desc_data++; + } + + return 0; +} + +static int hclge_mac_update_stats_complete(struct hclge_dev *hdev) +{ +#define HCLGE_REG_NUM_PER_DESC 4 + + u32 reg_num = hdev->ae_dev->dev_specs.mac_stats_num; + u64 *data = (u64 *)(&hdev->mac_stats); + struct hclge_desc *desc; + __le64 *desc_data; + u32 data_size; + u32 desc_num; + int ret; + u32 i; + + /* The first desc has a 64-bit header, so need to consider it */ + desc_num = reg_num / HCLGE_REG_NUM_PER_DESC + 1; + + /* This may be called inside atomic sections, + * so GFP_ATOMIC is more suitalbe here + */ + desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC); + if (!desc) + return -ENOMEM; + + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true); + ret = hclge_cmd_send(&hdev->hw, desc, desc_num); + if (ret) { + kfree(desc); + return ret; + } + + data_size = min_t(u32, sizeof(hdev->mac_stats) / sizeof(u64), reg_num); + + desc_data = (__le64 *)(&desc[0].data[0]); + for (i = 0; i < data_size; i++) { + /* data memory is continuous becase only the first desc has a + * header in this command + */ + *data += le64_to_cpu(*desc_data); + data++; + desc_data++; + } + + kfree(desc); + + return 0; +} + +static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *reg_num) +{ + struct hclge_desc desc; + int ret; + + /* Driver needs total register number of both valid registers and + * reserved registers, but the old firmware only returns number + * of valid registers in device V2. To be compatible with these + * devices, driver uses a fixed value. + */ + if (hdev->ae_dev->dev_version == HNAE3_DEVICE_VERSION_V2) { + *reg_num = HCLGE_MAC_STATS_MAX_NUM_V1; + return 0; + } + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to query mac statistic reg number, ret = %d\n", + ret); + return ret; + } + + *reg_num = le32_to_cpu(desc.data[0]); + if (*reg_num == 0) { + dev_err(&hdev->pdev->dev, + "mac statistic reg number is invalid!\n"); + return -ENODATA; + } + + return 0; +} + +int hclge_mac_update_stats(struct hclge_dev *hdev) +{ + /* The firmware supports the new statistics acquisition method */ + if (hdev->ae_dev->dev_specs.mac_stats_num) + return hclge_mac_update_stats_complete(hdev); + else + return hclge_mac_update_stats_defective(hdev); +} + +static int hclge_comm_get_count(struct hclge_dev *hdev, + const struct hclge_comm_stats_str strs[], + u32 size) +{ + int count = 0; + u32 i; + + for (i = 0; i < size; i++) + if (strs[i].stats_num <= hdev->ae_dev->dev_specs.mac_stats_num) + count++; + + return count; +} + +static u64 *hclge_comm_get_stats(struct hclge_dev *hdev, + const struct hclge_comm_stats_str strs[], + int size, u64 *data) +{ + u64 *buf = data; + u32 i; + + for (i = 0; i < size; i++) { + if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num) + continue; + + *buf = HCLGE_STATS_READ(&hdev->mac_stats, strs[i].offset); + buf++; + } + + return buf; +} + +static u8 *hclge_comm_get_strings(struct hclge_dev *hdev, u32 stringset, + const struct hclge_comm_stats_str strs[], + int size, u8 *data) +{ + char *buff = (char *)data; + u32 i; + + if (stringset != ETH_SS_STATS) + return buff; + + for (i = 0; i < size; i++) { + if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num) + continue; + + snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc); + buff = buff + ETH_GSTRING_LEN; + } + + return (u8 *)buff; +} + +static void hclge_update_stats_for_all(struct hclge_dev *hdev) +{ + struct hnae3_handle *handle; + int status; + + handle = &hdev->vport[0].nic; + if (handle->client) { + status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw); + if (status) { + dev_err(&hdev->pdev->dev, + "Update TQPS stats fail, status = %d.\n", + status); + } + } + + hclge_update_fec_stats(hdev); + + status = hclge_mac_update_stats(hdev); + if (status) + dev_err(&hdev->pdev->dev, + "Update MAC stats fail, status = %d.\n", status); +} + +static void hclge_update_stats(struct hnae3_handle *handle, + struct net_device_stats *net_stats) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + int status; + + if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state)) + return; + + status = hclge_mac_update_stats(hdev); + if (status) + dev_err(&hdev->pdev->dev, + "Update MAC stats fail, status = %d.\n", + status); + + status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw); + if (status) + dev_err(&hdev->pdev->dev, + "Update TQPS stats fail, status = %d.\n", + status); + + clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state); +} + +static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset) +{ +#define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK | \ + HNAE3_SUPPORT_PHY_LOOPBACK | \ + HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK | \ + HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK | \ + HNAE3_SUPPORT_EXTERNAL_LOOPBACK) + + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + int count = 0; + + /* Loopback test support rules: + * mac: only GE mode support + * serdes: all mac mode will support include GE/XGE/LGE/CGE + * phy: only support when phy device exist on board + */ + if (stringset == ETH_SS_TEST) { + /* clear loopback bit flags at first */ + handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS)); + if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 || + hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M || + hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M || + hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) { + count += 1; + handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK; + } + + count += 1; + handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK; + count += 1; + handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK; + count += 1; + handle->flags |= HNAE3_SUPPORT_EXTERNAL_LOOPBACK; + + if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv && + hdev->hw.mac.phydev->drv->set_loopback) || + hnae3_dev_phy_imp_supported(hdev)) { + count += 1; + handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK; + } + } else if (stringset == ETH_SS_STATS) { + count = hclge_comm_get_count(hdev, g_mac_stats_string, + ARRAY_SIZE(g_mac_stats_string)) + + hclge_comm_tqps_get_sset_count(handle); + } + + return count; +} + +static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset, + u8 *data) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + u8 *p = (char *)data; + int size; + + if (stringset == ETH_SS_STATS) { + size = ARRAY_SIZE(g_mac_stats_string); + p = hclge_comm_get_strings(hdev, stringset, g_mac_stats_string, + size, p); + p = hclge_comm_tqps_get_strings(handle, p); + } else if (stringset == ETH_SS_TEST) { + if (handle->flags & HNAE3_SUPPORT_EXTERNAL_LOOPBACK) { + memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_EXTERNAL], + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) { + memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP], + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) { + memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES], + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) { + memcpy(p, + hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES], + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) { + memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY], + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + } +} + +static void hclge_get_stats(struct hnae3_handle *handle, u64 *data) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + u64 *p; + + p = hclge_comm_get_stats(hdev, g_mac_stats_string, + ARRAY_SIZE(g_mac_stats_string), data); + p = hclge_comm_tqps_get_stats(handle, p); +} + +static void hclge_get_mac_stat(struct hnae3_handle *handle, + struct hns3_mac_stats *mac_stats) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + hclge_update_stats(handle, NULL); + + mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num; + mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num; +} + +static int hclge_parse_func_status(struct hclge_dev *hdev, + struct hclge_func_status_cmd *status) +{ +#define HCLGE_MAC_ID_MASK 0xF + + if (!(status->pf_state & HCLGE_PF_STATE_DONE)) + return -EINVAL; + + /* Set the pf to main pf */ + if (status->pf_state & HCLGE_PF_STATE_MAIN) + hdev->flag |= HCLGE_FLAG_MAIN; + else + hdev->flag &= ~HCLGE_FLAG_MAIN; + + hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK; + return 0; +} + +static int hclge_query_function_status(struct hclge_dev *hdev) +{ +#define HCLGE_QUERY_MAX_CNT 5 + + struct hclge_func_status_cmd *req; + struct hclge_desc desc; + int timeout = 0; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true); + req = (struct hclge_func_status_cmd *)desc.data; + + do { + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "query function status failed %d.\n", ret); + return ret; + } + + /* Check pf reset is done */ + if (req->pf_state) + break; + usleep_range(1000, 2000); + } while (timeout++ < HCLGE_QUERY_MAX_CNT); + + return hclge_parse_func_status(hdev, req); +} + +static int hclge_query_pf_resource(struct hclge_dev *hdev) +{ + struct hclge_pf_res_cmd *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "query pf resource failed %d.\n", ret); + return ret; + } + + req = (struct hclge_pf_res_cmd *)desc.data; + hdev->num_tqps = le16_to_cpu(req->tqp_num) + + le16_to_cpu(req->ext_tqp_num); + hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S; + + if (req->tx_buf_size) + hdev->tx_buf_size = + le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S; + else + hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF; + + hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT); + + if (req->dv_buf_size) + hdev->dv_buf_size = + le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S; + else + hdev->dv_buf_size = HCLGE_DEFAULT_DV; + + hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT); + + hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic); + if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) { + dev_err(&hdev->pdev->dev, + "only %u msi resources available, not enough for pf(min:2).\n", + hdev->num_nic_msi); + return -EINVAL; + } + + if (hnae3_dev_roce_supported(hdev)) { + hdev->num_roce_msi = + le16_to_cpu(req->pf_intr_vector_number_roce); + + /* PF should have NIC vectors and Roce vectors, + * NIC vectors are queued before Roce vectors. + */ + hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi; + } else { + hdev->num_msi = hdev->num_nic_msi; + } + + return 0; +} + +static int hclge_parse_speed(u8 speed_cmd, u32 *speed) +{ + switch (speed_cmd) { + case HCLGE_FW_MAC_SPEED_10M: + *speed = HCLGE_MAC_SPEED_10M; + break; + case HCLGE_FW_MAC_SPEED_100M: + *speed = HCLGE_MAC_SPEED_100M; + break; + case HCLGE_FW_MAC_SPEED_1G: + *speed = HCLGE_MAC_SPEED_1G; + break; + case HCLGE_FW_MAC_SPEED_10G: + *speed = HCLGE_MAC_SPEED_10G; + break; + case HCLGE_FW_MAC_SPEED_25G: + *speed = HCLGE_MAC_SPEED_25G; + break; + case HCLGE_FW_MAC_SPEED_40G: + *speed = HCLGE_MAC_SPEED_40G; + break; + case HCLGE_FW_MAC_SPEED_50G: + *speed = HCLGE_MAC_SPEED_50G; + break; + case HCLGE_FW_MAC_SPEED_100G: + *speed = HCLGE_MAC_SPEED_100G; + break; + case HCLGE_FW_MAC_SPEED_200G: + *speed = HCLGE_MAC_SPEED_200G; + break; + default: + return -EINVAL; + } + + return 0; +} + +static const struct hclge_speed_bit_map speed_bit_map[] = { + {HCLGE_MAC_SPEED_10M, HCLGE_SUPPORT_10M_BIT}, + {HCLGE_MAC_SPEED_100M, HCLGE_SUPPORT_100M_BIT}, + {HCLGE_MAC_SPEED_1G, HCLGE_SUPPORT_1G_BIT}, + {HCLGE_MAC_SPEED_10G, HCLGE_SUPPORT_10G_BIT}, + {HCLGE_MAC_SPEED_25G, HCLGE_SUPPORT_25G_BIT}, + {HCLGE_MAC_SPEED_40G, HCLGE_SUPPORT_40G_BIT}, + {HCLGE_MAC_SPEED_50G, HCLGE_SUPPORT_50G_BIT}, + {HCLGE_MAC_SPEED_100G, HCLGE_SUPPORT_100G_BIT}, + {HCLGE_MAC_SPEED_200G, HCLGE_SUPPORT_200G_BIT}, +}; + +static int hclge_get_speed_bit(u32 speed, u32 *speed_bit) +{ + u16 i; + + for (i = 0; i < ARRAY_SIZE(speed_bit_map); i++) { + if (speed == speed_bit_map[i].speed) { + *speed_bit = speed_bit_map[i].speed_bit; + return 0; + } + } + + return -EINVAL; +} + +static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + u32 speed_ability = hdev->hw.mac.speed_ability; + u32 speed_bit = 0; + int ret; + + ret = hclge_get_speed_bit(speed, &speed_bit); + if (ret) + return ret; + + if (speed_bit & speed_ability) + return 0; + + return -EINVAL; +} + +static void hclge_update_fec_support(struct hclge_mac *mac) +{ + linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported); + linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported); + linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT, mac->supported); + linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported); + + if (mac->fec_ability & BIT(HNAE3_FEC_BASER)) + linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, + mac->supported); + if (mac->fec_ability & BIT(HNAE3_FEC_RS)) + linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, + mac->supported); + if (mac->fec_ability & BIT(HNAE3_FEC_LLRS)) + linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT, + mac->supported); + if (mac->fec_ability & BIT(HNAE3_FEC_NONE)) + linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, + mac->supported); +} + +static void hclge_convert_setting_sr(u16 speed_ability, + unsigned long *link_mode) +{ + if (speed_ability & HCLGE_SUPPORT_10G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, + link_mode); + if (speed_ability & HCLGE_SUPPORT_25G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, + link_mode); + if (speed_ability & HCLGE_SUPPORT_40G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, + link_mode); + if (speed_ability & HCLGE_SUPPORT_50G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, + link_mode); + if (speed_ability & HCLGE_SUPPORT_100G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, + link_mode); + if (speed_ability & HCLGE_SUPPORT_200G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT, + link_mode); +} + +static void hclge_convert_setting_lr(u16 speed_ability, + unsigned long *link_mode) +{ + if (speed_ability & HCLGE_SUPPORT_10G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, + link_mode); + if (speed_ability & HCLGE_SUPPORT_25G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, + link_mode); + if (speed_ability & HCLGE_SUPPORT_50G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, + link_mode); + if (speed_ability & HCLGE_SUPPORT_40G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, + link_mode); + if (speed_ability & HCLGE_SUPPORT_100G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, + link_mode); + if (speed_ability & HCLGE_SUPPORT_200G_BIT) + linkmode_set_bit( + ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT, + link_mode); +} + +static void hclge_convert_setting_cr(u16 speed_ability, + unsigned long *link_mode) +{ + if (speed_ability & HCLGE_SUPPORT_10G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, + link_mode); + if (speed_ability & HCLGE_SUPPORT_25G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, + link_mode); + if (speed_ability & HCLGE_SUPPORT_40G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, + link_mode); + if (speed_ability & HCLGE_SUPPORT_50G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, + link_mode); + if (speed_ability & HCLGE_SUPPORT_100G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, + link_mode); + if (speed_ability & HCLGE_SUPPORT_200G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT, + link_mode); +} + +static void hclge_convert_setting_kr(u16 speed_ability, + unsigned long *link_mode) +{ + if (speed_ability & HCLGE_SUPPORT_1G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, + link_mode); + if (speed_ability & HCLGE_SUPPORT_10G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, + link_mode); + if (speed_ability & HCLGE_SUPPORT_25G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, + link_mode); + if (speed_ability & HCLGE_SUPPORT_40G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, + link_mode); + if (speed_ability & HCLGE_SUPPORT_50G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, + link_mode); + if (speed_ability & HCLGE_SUPPORT_100G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, + link_mode); + if (speed_ability & HCLGE_SUPPORT_200G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT, + link_mode); +} + +static void hclge_convert_setting_fec(struct hclge_mac *mac) +{ + /* If firmware has reported fec_ability, don't need to convert by speed */ + if (mac->fec_ability) + goto out; + + switch (mac->speed) { + case HCLGE_MAC_SPEED_10G: + case HCLGE_MAC_SPEED_40G: + mac->fec_ability = BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO) | + BIT(HNAE3_FEC_NONE); + break; + case HCLGE_MAC_SPEED_25G: + case HCLGE_MAC_SPEED_50G: + mac->fec_ability = BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) | + BIT(HNAE3_FEC_AUTO) | BIT(HNAE3_FEC_NONE); + break; + case HCLGE_MAC_SPEED_100G: + mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO) | + BIT(HNAE3_FEC_NONE); + break; + case HCLGE_MAC_SPEED_200G: + mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO) | + BIT(HNAE3_FEC_LLRS); + break; + default: + mac->fec_ability = 0; + break; + } + +out: + hclge_update_fec_support(mac); +} + +static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev, + u16 speed_ability) +{ + struct hclge_mac *mac = &hdev->hw.mac; + + if (speed_ability & HCLGE_SUPPORT_1G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, + mac->supported); + + hclge_convert_setting_sr(speed_ability, mac->supported); + hclge_convert_setting_lr(speed_ability, mac->supported); + hclge_convert_setting_cr(speed_ability, mac->supported); + if (hnae3_dev_fec_supported(hdev)) + hclge_convert_setting_fec(mac); + + if (hnae3_dev_pause_supported(hdev)) + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported); + + linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported); +} + +static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev, + u16 speed_ability) +{ + struct hclge_mac *mac = &hdev->hw.mac; + + hclge_convert_setting_kr(speed_ability, mac->supported); + if (hnae3_dev_fec_supported(hdev)) + hclge_convert_setting_fec(mac); + + if (hnae3_dev_pause_supported(hdev)) + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported); + + linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported); +} + +static void hclge_parse_copper_link_mode(struct hclge_dev *hdev, + u16 speed_ability) +{ + unsigned long *supported = hdev->hw.mac.supported; + + /* default to support all speed for GE port */ + if (!speed_ability) + speed_ability = HCLGE_SUPPORT_GE; + + if (speed_ability & HCLGE_SUPPORT_1G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + supported); + + if (speed_ability & HCLGE_SUPPORT_100M_BIT) { + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, + supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, + supported); + } + + if (speed_ability & HCLGE_SUPPORT_10M_BIT) { + linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported); + } + + if (hnae3_dev_pause_supported(hdev)) { + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported); + } + + linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported); +} + +static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability) +{ + u8 media_type = hdev->hw.mac.media_type; + + if (media_type == HNAE3_MEDIA_TYPE_FIBER) + hclge_parse_fiber_link_mode(hdev, speed_ability); + else if (media_type == HNAE3_MEDIA_TYPE_COPPER) + hclge_parse_copper_link_mode(hdev, speed_ability); + else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE) + hclge_parse_backplane_link_mode(hdev, speed_ability); +} + +static u32 hclge_get_max_speed(u16 speed_ability) +{ + if (speed_ability & HCLGE_SUPPORT_200G_BIT) + return HCLGE_MAC_SPEED_200G; + + if (speed_ability & HCLGE_SUPPORT_100G_BIT) + return HCLGE_MAC_SPEED_100G; + + if (speed_ability & HCLGE_SUPPORT_50G_BIT) + return HCLGE_MAC_SPEED_50G; + + if (speed_ability & HCLGE_SUPPORT_40G_BIT) + return HCLGE_MAC_SPEED_40G; + + if (speed_ability & HCLGE_SUPPORT_25G_BIT) + return HCLGE_MAC_SPEED_25G; + + if (speed_ability & HCLGE_SUPPORT_10G_BIT) + return HCLGE_MAC_SPEED_10G; + + if (speed_ability & HCLGE_SUPPORT_1G_BIT) + return HCLGE_MAC_SPEED_1G; + + if (speed_ability & HCLGE_SUPPORT_100M_BIT) + return HCLGE_MAC_SPEED_100M; + + if (speed_ability & HCLGE_SUPPORT_10M_BIT) + return HCLGE_MAC_SPEED_10M; + + return HCLGE_MAC_SPEED_1G; +} + +static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) +{ +#define HCLGE_TX_SPARE_SIZE_UNIT 4096 +#define SPEED_ABILITY_EXT_SHIFT 8 + + struct hclge_cfg_param_cmd *req; + u64 mac_addr_tmp_high; + u16 speed_ability_ext; + u64 mac_addr_tmp; + unsigned int i; + + req = (struct hclge_cfg_param_cmd *)desc[0].data; + + /* get the configuration */ + cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]), + HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S); + cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]), + HCLGE_CFG_TQP_DESC_N_M, + HCLGE_CFG_TQP_DESC_N_S); + + cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]), + HCLGE_CFG_PHY_ADDR_M, + HCLGE_CFG_PHY_ADDR_S); + cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]), + HCLGE_CFG_MEDIA_TP_M, + HCLGE_CFG_MEDIA_TP_S); + cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]), + HCLGE_CFG_RX_BUF_LEN_M, + HCLGE_CFG_RX_BUF_LEN_S); + /* get mac_address */ + mac_addr_tmp = __le32_to_cpu(req->param[2]); + mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]), + HCLGE_CFG_MAC_ADDR_H_M, + HCLGE_CFG_MAC_ADDR_H_S); + + mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1; + + cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]), + HCLGE_CFG_DEFAULT_SPEED_M, + HCLGE_CFG_DEFAULT_SPEED_S); + cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]), + HCLGE_CFG_RSS_SIZE_M, + HCLGE_CFG_RSS_SIZE_S); + + for (i = 0; i < ETH_ALEN; i++) + cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff; + + req = (struct hclge_cfg_param_cmd *)desc[1].data; + cfg->numa_node_map = __le32_to_cpu(req->param[0]); + + cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]), + HCLGE_CFG_SPEED_ABILITY_M, + HCLGE_CFG_SPEED_ABILITY_S); + speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]), + HCLGE_CFG_SPEED_ABILITY_EXT_M, + HCLGE_CFG_SPEED_ABILITY_EXT_S); + cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT; + + cfg->vlan_fliter_cap = hnae3_get_field(__le32_to_cpu(req->param[1]), + HCLGE_CFG_VLAN_FLTR_CAP_M, + HCLGE_CFG_VLAN_FLTR_CAP_S); + + cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]), + HCLGE_CFG_UMV_TBL_SPACE_M, + HCLGE_CFG_UMV_TBL_SPACE_S); + + cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]), + HCLGE_CFG_PF_RSS_SIZE_M, + HCLGE_CFG_PF_RSS_SIZE_S); + + /* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a + * power of 2, instead of reading out directly. This would + * be more flexible for future changes and expansions. + * When VF max rss size field is HCLGE_CFG_RSS_SIZE_S, + * it does not make sense if PF's field is 0. In this case, PF and VF + * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S. + */ + cfg->pf_rss_size_max = cfg->pf_rss_size_max ? + 1U << cfg->pf_rss_size_max : + cfg->vf_rss_size_max; + + /* The unit of the tx spare buffer size queried from configuration + * file is HCLGE_TX_SPARE_SIZE_UNIT(4096) bytes, so a conversion is + * needed here. + */ + cfg->tx_spare_buf_size = hnae3_get_field(__le32_to_cpu(req->param[2]), + HCLGE_CFG_TX_SPARE_BUF_SIZE_M, + HCLGE_CFG_TX_SPARE_BUF_SIZE_S); + cfg->tx_spare_buf_size *= HCLGE_TX_SPARE_SIZE_UNIT; +} + +/* hclge_get_cfg: query the static parameter from flash + * @hdev: pointer to struct hclge_dev + * @hcfg: the config structure to be getted + */ +static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg) +{ + struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM]; + struct hclge_cfg_param_cmd *req; + unsigned int i; + int ret; + + for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) { + u32 offset = 0; + + req = (struct hclge_cfg_param_cmd *)desc[i].data; + hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM, + true); + hnae3_set_field(offset, HCLGE_CFG_OFFSET_M, + HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES); + /* Len should be united by 4 bytes when send to hardware */ + hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S, + HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT); + req->offset = cpu_to_le32(offset); + } + + ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM); + if (ret) { + dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret); + return ret; + } + + hclge_parse_cfg(hcfg, desc); + + return 0; +} + +static void hclge_set_default_dev_specs(struct hclge_dev *hdev) +{ +#define HCLGE_MAX_NON_TSO_BD_NUM 8U + + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); + + ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM; + ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE; + ae_dev->dev_specs.rss_key_size = HCLGE_COMM_RSS_KEY_SIZE; + ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE; + ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL; + ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME; + ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM; + ae_dev->dev_specs.umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF; +} + +static void hclge_parse_dev_specs(struct hclge_dev *hdev, + struct hclge_desc *desc) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); + struct hclge_dev_specs_0_cmd *req0; + struct hclge_dev_specs_1_cmd *req1; + + req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data; + req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data; + + ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num; + ae_dev->dev_specs.rss_ind_tbl_size = + le16_to_cpu(req0->rss_ind_tbl_size); + ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max); + ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size); + ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate); + ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num); + ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl); + ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size); + ae_dev->dev_specs.umv_size = le16_to_cpu(req1->umv_size); + ae_dev->dev_specs.mc_mac_size = le16_to_cpu(req1->mc_mac_size); +} + +static void hclge_check_dev_specs(struct hclge_dev *hdev) +{ + struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs; + + if (!dev_specs->max_non_tso_bd_num) + dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM; + if (!dev_specs->rss_ind_tbl_size) + dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE; + if (!dev_specs->rss_key_size) + dev_specs->rss_key_size = HCLGE_COMM_RSS_KEY_SIZE; + if (!dev_specs->max_tm_rate) + dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE; + if (!dev_specs->max_qset_num) + dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM; + if (!dev_specs->max_int_gl) + dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL; + if (!dev_specs->max_frm_size) + dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME; + if (!dev_specs->umv_size) + dev_specs->umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF; +} + +static int hclge_query_mac_stats_num(struct hclge_dev *hdev) +{ + u32 reg_num = 0; + int ret; + + ret = hclge_mac_query_reg_num(hdev, ®_num); + if (ret && ret != -EOPNOTSUPP) + return ret; + + hdev->ae_dev->dev_specs.mac_stats_num = reg_num; + return 0; +} + +static int hclge_query_dev_specs(struct hclge_dev *hdev) +{ + struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM]; + int ret; + int i; + + ret = hclge_query_mac_stats_num(hdev); + if (ret) + return ret; + + /* set default specifications as devices lower than version V3 do not + * support querying specifications from firmware. + */ + if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) { + hclge_set_default_dev_specs(hdev); + return 0; + } + + for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) { + hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, + true); + desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + } + hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true); + + ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM); + if (ret) + return ret; + + hclge_parse_dev_specs(hdev, desc); + hclge_check_dev_specs(hdev); + + return 0; +} + +static int hclge_get_cap(struct hclge_dev *hdev) +{ + int ret; + + ret = hclge_query_function_status(hdev); + if (ret) { + dev_err(&hdev->pdev->dev, + "query function status error %d.\n", ret); + return ret; + } + + /* get pf resource */ + return hclge_query_pf_resource(hdev); +} + +static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev) +{ +#define HCLGE_MIN_TX_DESC 64 +#define HCLGE_MIN_RX_DESC 64 + + if (!is_kdump_kernel()) + return; + + dev_info(&hdev->pdev->dev, + "Running kdump kernel. Using minimal resources\n"); + + /* minimal queue pairs equals to the number of vports */ + hdev->num_tqps = hdev->num_req_vfs + 1; + hdev->num_tx_desc = HCLGE_MIN_TX_DESC; + hdev->num_rx_desc = HCLGE_MIN_RX_DESC; +} + +static void hclge_init_tc_config(struct hclge_dev *hdev) +{ + unsigned int i; + + if (hdev->tc_max > HNAE3_MAX_TC || + hdev->tc_max < 1) { + dev_warn(&hdev->pdev->dev, "TC num = %u.\n", + hdev->tc_max); + hdev->tc_max = 1; + } + + /* Dev does not support DCB */ + if (!hnae3_dev_dcb_supported(hdev)) { + hdev->tc_max = 1; + hdev->pfc_max = 0; + } else { + hdev->pfc_max = hdev->tc_max; + } + + hdev->tm_info.num_tc = 1; + + /* Currently not support uncontiuous tc */ + for (i = 0; i < hdev->tm_info.num_tc; i++) + hnae3_set_bit(hdev->hw_tc_map, i, 1); + + hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE; +} + +static int hclge_configure(struct hclge_dev *hdev) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); + struct hclge_cfg cfg; + int ret; + + ret = hclge_get_cfg(hdev, &cfg); + if (ret) + return ret; + + hdev->base_tqp_pid = 0; + hdev->vf_rss_size_max = cfg.vf_rss_size_max; + hdev->pf_rss_size_max = cfg.pf_rss_size_max; + hdev->rx_buf_len = cfg.rx_buf_len; + ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr); + hdev->hw.mac.media_type = cfg.media_type; + hdev->hw.mac.phy_addr = cfg.phy_addr; + hdev->num_tx_desc = cfg.tqp_desc_num; + hdev->num_rx_desc = cfg.tqp_desc_num; + hdev->tm_info.num_pg = 1; + hdev->tc_max = cfg.tc_num; + hdev->tm_info.hw_pfc_map = 0; + if (cfg.umv_space) + hdev->wanted_umv_size = cfg.umv_space; + else + hdev->wanted_umv_size = hdev->ae_dev->dev_specs.umv_size; + hdev->tx_spare_buf_size = cfg.tx_spare_buf_size; + hdev->gro_en = true; + if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF) + set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps); + + if (hnae3_ae_dev_fd_supported(hdev->ae_dev)) { + hdev->fd_en = true; + hdev->fd_active_type = HCLGE_FD_RULE_NONE; + } + + ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed); + if (ret) { + dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n", + cfg.default_speed, ret); + return ret; + } + + hclge_parse_link_mode(hdev, cfg.speed_ability); + + hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability); + + hclge_init_tc_config(hdev); + hclge_init_kdump_kernel_config(hdev); + + return ret; +} + +static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min, + u16 tso_mss_max) +{ + struct hclge_cfg_tso_status_cmd *req; + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false); + + req = (struct hclge_cfg_tso_status_cmd *)desc.data; + req->tso_mss_min = cpu_to_le16(tso_mss_min); + req->tso_mss_max = cpu_to_le16(tso_mss_max); + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_config_gro(struct hclge_dev *hdev) +{ + struct hclge_cfg_gro_status_cmd *req; + struct hclge_desc desc; + int ret; + + if (!hnae3_ae_dev_gro_supported(hdev->ae_dev)) + return 0; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false); + req = (struct hclge_cfg_gro_status_cmd *)desc.data; + + req->gro_en = hdev->gro_en ? 1 : 0; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "GRO hardware config cmd failed, ret = %d\n", ret); + + return ret; +} + +static int hclge_alloc_tqps(struct hclge_dev *hdev) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); + struct hclge_comm_tqp *tqp; + int i; + + hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, + sizeof(struct hclge_comm_tqp), GFP_KERNEL); + if (!hdev->htqp) + return -ENOMEM; + + tqp = hdev->htqp; + + for (i = 0; i < hdev->num_tqps; i++) { + tqp->dev = &hdev->pdev->dev; + tqp->index = i; + + tqp->q.ae_algo = &ae_algo; + tqp->q.buf_size = hdev->rx_buf_len; + tqp->q.tx_desc_num = hdev->num_tx_desc; + tqp->q.rx_desc_num = hdev->num_rx_desc; + + /* need an extended offset to configure queues >= + * HCLGE_TQP_MAX_SIZE_DEV_V2 + */ + if (i < HCLGE_TQP_MAX_SIZE_DEV_V2) + tqp->q.io_base = hdev->hw.hw.io_base + + HCLGE_TQP_REG_OFFSET + + i * HCLGE_TQP_REG_SIZE; + else + tqp->q.io_base = hdev->hw.hw.io_base + + HCLGE_TQP_REG_OFFSET + + HCLGE_TQP_EXT_REG_OFFSET + + (i - HCLGE_TQP_MAX_SIZE_DEV_V2) * + HCLGE_TQP_REG_SIZE; + + /* when device supports tx push and has device memory, + * the queue can execute push mode or doorbell mode on + * device memory. + */ + if (test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps)) + tqp->q.mem_base = hdev->hw.hw.mem_base + + HCLGE_TQP_MEM_OFFSET(hdev, i); + + tqp++; + } + + return 0; +} + +static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id, + u16 tqp_pid, u16 tqp_vid, bool is_pf) +{ + struct hclge_tqp_map_cmd *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false); + + req = (struct hclge_tqp_map_cmd *)desc.data; + req->tqp_id = cpu_to_le16(tqp_pid); + req->tqp_vf = func_id; + req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B; + if (!is_pf) + req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B; + req->tqp_vid = cpu_to_le16(tqp_vid); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret); + + return ret; +} + +static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps) +{ + struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; + struct hclge_dev *hdev = vport->back; + int i, alloced; + + for (i = 0, alloced = 0; i < hdev->num_tqps && + alloced < num_tqps; i++) { + if (!hdev->htqp[i].alloced) { + hdev->htqp[i].q.handle = &vport->nic; + hdev->htqp[i].q.tqp_index = alloced; + hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc; + hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc; + kinfo->tqp[alloced] = &hdev->htqp[i].q; + hdev->htqp[i].alloced = true; + alloced++; + } + } + vport->alloc_tqps = alloced; + kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max, + vport->alloc_tqps / hdev->tm_info.num_tc); + + /* ensure one to one mapping between irq and queue at default */ + kinfo->rss_size = min_t(u16, kinfo->rss_size, + (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc); + + return 0; +} + +static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps, + u16 num_tx_desc, u16 num_rx_desc) + +{ + struct hnae3_handle *nic = &vport->nic; + struct hnae3_knic_private_info *kinfo = &nic->kinfo; + struct hclge_dev *hdev = vport->back; + int ret; + + kinfo->num_tx_desc = num_tx_desc; + kinfo->num_rx_desc = num_rx_desc; + + kinfo->rx_buf_len = hdev->rx_buf_len; + kinfo->tx_spare_buf_size = hdev->tx_spare_buf_size; + + kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps, + sizeof(struct hnae3_queue *), GFP_KERNEL); + if (!kinfo->tqp) + return -ENOMEM; + + ret = hclge_assign_tqp(vport, num_tqps); + if (ret) + dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret); + + return ret; +} + +static int hclge_map_tqp_to_vport(struct hclge_dev *hdev, + struct hclge_vport *vport) +{ + struct hnae3_handle *nic = &vport->nic; + struct hnae3_knic_private_info *kinfo; + u16 i; + + kinfo = &nic->kinfo; + for (i = 0; i < vport->alloc_tqps; i++) { + struct hclge_comm_tqp *q = + container_of(kinfo->tqp[i], struct hclge_comm_tqp, q); + bool is_pf; + int ret; + + is_pf = !(vport->vport_id); + ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index, + i, is_pf); + if (ret) + return ret; + } + + return 0; +} + +static int hclge_map_tqp(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = hdev->vport; + u16 i, num_vport; + + num_vport = hdev->num_req_vfs + 1; + for (i = 0; i < num_vport; i++) { + int ret; + + ret = hclge_map_tqp_to_vport(hdev, vport); + if (ret) + return ret; + + vport++; + } + + return 0; +} + +static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps) +{ + struct hnae3_handle *nic = &vport->nic; + struct hclge_dev *hdev = vport->back; + int ret; + + nic->pdev = hdev->pdev; + nic->ae_algo = &ae_algo; + nic->numa_node_mask = hdev->numa_node_mask; + nic->kinfo.io_base = hdev->hw.hw.io_base; + + ret = hclge_knic_setup(vport, num_tqps, + hdev->num_tx_desc, hdev->num_rx_desc); + if (ret) + dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret); + + return ret; +} + +static int hclge_alloc_vport(struct hclge_dev *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + struct hclge_vport *vport; + u32 tqp_main_vport; + u32 tqp_per_vport; + int num_vport, i; + int ret; + + /* We need to alloc a vport for main NIC of PF */ + num_vport = hdev->num_req_vfs + 1; + + if (hdev->num_tqps < num_vport) { + dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)", + hdev->num_tqps, num_vport); + return -EINVAL; + } + + /* Alloc the same number of TQPs for every vport */ + tqp_per_vport = hdev->num_tqps / num_vport; + tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport; + + vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport), + GFP_KERNEL); + if (!vport) + return -ENOMEM; + + hdev->vport = vport; + hdev->num_alloc_vport = num_vport; + + if (IS_ENABLED(CONFIG_PCI_IOV)) + hdev->num_alloc_vfs = hdev->num_req_vfs; + + for (i = 0; i < num_vport; i++) { + vport->back = hdev; + vport->vport_id = i; + vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO; + vport->mps = HCLGE_MAC_DEFAULT_FRAME; + vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE; + vport->port_base_vlan_cfg.tbl_sta = true; + vport->rxvlan_cfg.rx_vlan_offload_en = true; + vport->req_vlan_fltr_en = true; + INIT_LIST_HEAD(&vport->vlan_list); + INIT_LIST_HEAD(&vport->uc_mac_list); + INIT_LIST_HEAD(&vport->mc_mac_list); + spin_lock_init(&vport->mac_list_lock); + + if (i == 0) + ret = hclge_vport_setup(vport, tqp_main_vport); + else + ret = hclge_vport_setup(vport, tqp_per_vport); + if (ret) { + dev_err(&pdev->dev, + "vport setup failed for vport %d, %d\n", + i, ret); + return ret; + } + + vport++; + } + + return 0; +} + +static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev, + struct hclge_pkt_buf_alloc *buf_alloc) +{ +/* TX buffer size is unit by 128 byte */ +#define HCLGE_BUF_SIZE_UNIT_SHIFT 7 +#define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15) + struct hclge_tx_buff_alloc_cmd *req; + struct hclge_desc desc; + int ret; + u8 i; + + req = (struct hclge_tx_buff_alloc_cmd *)desc.data; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0); + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { + u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size; + + req->tx_pkt_buff[i] = + cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) | + HCLGE_BUF_SIZE_UPDATE_EN_MSK); + } + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n", + ret); + + return ret; +} + +static int hclge_tx_buffer_alloc(struct hclge_dev *hdev, + struct hclge_pkt_buf_alloc *buf_alloc) +{ + int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc); + + if (ret) + dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret); + + return ret; +} + +static u32 hclge_get_tc_num(struct hclge_dev *hdev) +{ + unsigned int i; + u32 cnt = 0; + + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) + if (hdev->hw_tc_map & BIT(i)) + cnt++; + return cnt; +} + +/* Get the number of pfc enabled TCs, which have private buffer */ +static int hclge_get_pfc_priv_num(struct hclge_dev *hdev, + struct hclge_pkt_buf_alloc *buf_alloc) +{ + struct hclge_priv_buf *priv; + unsigned int i; + int cnt = 0; + + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { + priv = &buf_alloc->priv_buf[i]; + if ((hdev->tm_info.hw_pfc_map & BIT(i)) && + priv->enable) + cnt++; + } + + return cnt; +} + +/* Get the number of pfc disabled TCs, which have private buffer */ +static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev, + struct hclge_pkt_buf_alloc *buf_alloc) +{ + struct hclge_priv_buf *priv; + unsigned int i; + int cnt = 0; + + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { + priv = &buf_alloc->priv_buf[i]; + if (hdev->hw_tc_map & BIT(i) && + !(hdev->tm_info.hw_pfc_map & BIT(i)) && + priv->enable) + cnt++; + } + + return cnt; +} + +static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc) +{ + struct hclge_priv_buf *priv; + u32 rx_priv = 0; + int i; + + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { + priv = &buf_alloc->priv_buf[i]; + if (priv->enable) + rx_priv += priv->buf_size; + } + return rx_priv; +} + +static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc) +{ + u32 i, total_tx_size = 0; + + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) + total_tx_size += buf_alloc->priv_buf[i].tx_buf_size; + + return total_tx_size; +} + +static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, + struct hclge_pkt_buf_alloc *buf_alloc, + u32 rx_all) +{ + u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd; + u32 tc_num = hclge_get_tc_num(hdev); + u32 shared_buf, aligned_mps; + u32 rx_priv; + int i; + + aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT); + + if (hnae3_dev_dcb_supported(hdev)) + shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps + + hdev->dv_buf_size; + else + shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF + + hdev->dv_buf_size; + + shared_buf_tc = tc_num * aligned_mps + aligned_mps; + shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc), + HCLGE_BUF_SIZE_UNIT); + + rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc); + if (rx_all < rx_priv + shared_std) + return false; + + shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT); + buf_alloc->s_buf.buf_size = shared_buf; + if (hnae3_dev_dcb_supported(hdev)) { + buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size; + buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high + - roundup(aligned_mps / HCLGE_BUF_DIV_BY, + HCLGE_BUF_SIZE_UNIT); + } else { + buf_alloc->s_buf.self.high = aligned_mps + + HCLGE_NON_DCB_ADDITIONAL_BUF; + buf_alloc->s_buf.self.low = aligned_mps; + } + + if (hnae3_dev_dcb_supported(hdev)) { + hi_thrd = shared_buf - hdev->dv_buf_size; + + if (tc_num <= NEED_RESERVE_TC_NUM) + hi_thrd = hi_thrd * BUF_RESERVE_PERCENT + / BUF_MAX_PERCENT; + + if (tc_num) + hi_thrd = hi_thrd / tc_num; + + hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps); + hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT); + lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY; + } else { + hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF; + lo_thrd = aligned_mps; + } + + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { + buf_alloc->s_buf.tc_thrd[i].low = lo_thrd; + buf_alloc->s_buf.tc_thrd[i].high = hi_thrd; + } + + return true; +} + +static int hclge_tx_buffer_calc(struct hclge_dev *hdev, + struct hclge_pkt_buf_alloc *buf_alloc) +{ + u32 i, total_size; + + total_size = hdev->pkt_buf_size; + + /* alloc tx buffer for all enabled tc */ + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { + struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; + + if (hdev->hw_tc_map & BIT(i)) { + if (total_size < hdev->tx_buf_size) + return -ENOMEM; + + priv->tx_buf_size = hdev->tx_buf_size; + } else { + priv->tx_buf_size = 0; + } + + total_size -= priv->tx_buf_size; + } + + return 0; +} + +static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max, + struct hclge_pkt_buf_alloc *buf_alloc) +{ + u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); + u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT); + unsigned int i; + + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { + struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; + + priv->enable = 0; + priv->wl.low = 0; + priv->wl.high = 0; + priv->buf_size = 0; + + if (!(hdev->hw_tc_map & BIT(i))) + continue; + + priv->enable = 1; + + if (hdev->tm_info.hw_pfc_map & BIT(i)) { + priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT; + priv->wl.high = roundup(priv->wl.low + aligned_mps, + HCLGE_BUF_SIZE_UNIT); + } else { + priv->wl.low = 0; + priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) : + aligned_mps; + } + + priv->buf_size = priv->wl.high + hdev->dv_buf_size; + } + + return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all); +} + +static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev, + struct hclge_pkt_buf_alloc *buf_alloc) +{ + u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); + int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc); + int i; + + /* let the last to be cleared first */ + for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { + struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; + unsigned int mask = BIT((unsigned int)i); + + if (hdev->hw_tc_map & mask && + !(hdev->tm_info.hw_pfc_map & mask)) { + /* Clear the no pfc TC private buffer */ + priv->wl.low = 0; + priv->wl.high = 0; + priv->buf_size = 0; + priv->enable = 0; + no_pfc_priv_num--; + } + + if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) || + no_pfc_priv_num == 0) + break; + } + + return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all); +} + +static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev, + struct hclge_pkt_buf_alloc *buf_alloc) +{ + u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); + int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc); + int i; + + /* let the last to be cleared first */ + for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { + struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; + unsigned int mask = BIT((unsigned int)i); + + if (hdev->hw_tc_map & mask && + hdev->tm_info.hw_pfc_map & mask) { + /* Reduce the number of pfc TC with private buffer */ + priv->wl.low = 0; + priv->enable = 0; + priv->wl.high = 0; + priv->buf_size = 0; + pfc_priv_num--; + } + + if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) || + pfc_priv_num == 0) + break; + } + + return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all); +} + +static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev, + struct hclge_pkt_buf_alloc *buf_alloc) +{ +#define COMPENSATE_BUFFER 0x3C00 +#define COMPENSATE_HALF_MPS_NUM 5 +#define PRIV_WL_GAP 0x1800 + + u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); + u32 tc_num = hclge_get_tc_num(hdev); + u32 half_mps = hdev->mps >> 1; + u32 min_rx_priv; + unsigned int i; + + if (tc_num) + rx_priv = rx_priv / tc_num; + + if (tc_num <= NEED_RESERVE_TC_NUM) + rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT; + + min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER + + COMPENSATE_HALF_MPS_NUM * half_mps; + min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT); + rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT); + if (rx_priv < min_rx_priv) + return false; + + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { + struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; + + priv->enable = 0; + priv->wl.low = 0; + priv->wl.high = 0; + priv->buf_size = 0; + + if (!(hdev->hw_tc_map & BIT(i))) + continue; + + priv->enable = 1; + priv->buf_size = rx_priv; + priv->wl.high = rx_priv - hdev->dv_buf_size; + priv->wl.low = priv->wl.high - PRIV_WL_GAP; + } + + buf_alloc->s_buf.buf_size = 0; + + return true; +} + +/* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs + * @hdev: pointer to struct hclge_dev + * @buf_alloc: pointer to buffer calculation data + * @return: 0: calculate successful, negative: fail + */ +static int hclge_rx_buffer_calc(struct hclge_dev *hdev, + struct hclge_pkt_buf_alloc *buf_alloc) +{ + /* When DCB is not supported, rx private buffer is not allocated. */ + if (!hnae3_dev_dcb_supported(hdev)) { + u32 rx_all = hdev->pkt_buf_size; + + rx_all -= hclge_get_tx_buff_alloced(buf_alloc); + if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) + return -ENOMEM; + + return 0; + } + + if (hclge_only_alloc_priv_buff(hdev, buf_alloc)) + return 0; + + if (hclge_rx_buf_calc_all(hdev, true, buf_alloc)) + return 0; + + /* try to decrease the buffer size */ + if (hclge_rx_buf_calc_all(hdev, false, buf_alloc)) + return 0; + + if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc)) + return 0; + + if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc)) + return 0; + + return -ENOMEM; +} + +static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev, + struct hclge_pkt_buf_alloc *buf_alloc) +{ + struct hclge_rx_priv_buff_cmd *req; + struct hclge_desc desc; + int ret; + int i; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false); + req = (struct hclge_rx_priv_buff_cmd *)desc.data; + + /* Alloc private buffer TCs */ + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { + struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; + + req->buf_num[i] = + cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S); + req->buf_num[i] |= + cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B); + } + + req->shared_buf = + cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) | + (1 << HCLGE_TC0_PRI_BUF_EN_B)); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "rx private buffer alloc cmd failed %d\n", ret); + + return ret; +} + +static int hclge_rx_priv_wl_config(struct hclge_dev *hdev, + struct hclge_pkt_buf_alloc *buf_alloc) +{ + struct hclge_rx_priv_wl_buf *req; + struct hclge_priv_buf *priv; + struct hclge_desc desc[2]; + int i, j; + int ret; + + for (i = 0; i < 2; i++) { + hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC, + false); + req = (struct hclge_rx_priv_wl_buf *)desc[i].data; + + /* The first descriptor set the NEXT bit to 1 */ + if (i == 0) + desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + else + desc[i].flag &= ~cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + + for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) { + u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j; + + priv = &buf_alloc->priv_buf[idx]; + req->tc_wl[j].high = + cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S); + req->tc_wl[j].high |= + cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); + req->tc_wl[j].low = + cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S); + req->tc_wl[j].low |= + cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); + } + } + + /* Send 2 descriptor at one time */ + ret = hclge_cmd_send(&hdev->hw, desc, 2); + if (ret) + dev_err(&hdev->pdev->dev, + "rx private waterline config cmd failed %d\n", + ret); + return ret; +} + +static int hclge_common_thrd_config(struct hclge_dev *hdev, + struct hclge_pkt_buf_alloc *buf_alloc) +{ + struct hclge_shared_buf *s_buf = &buf_alloc->s_buf; + struct hclge_rx_com_thrd *req; + struct hclge_desc desc[2]; + struct hclge_tc_thrd *tc; + int i, j; + int ret; + + for (i = 0; i < 2; i++) { + hclge_cmd_setup_basic_desc(&desc[i], + HCLGE_OPC_RX_COM_THRD_ALLOC, false); + req = (struct hclge_rx_com_thrd *)&desc[i].data; + + /* The first descriptor set the NEXT bit to 1 */ + if (i == 0) + desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + else + desc[i].flag &= ~cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + + for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) { + tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j]; + + req->com_thrd[j].high = + cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S); + req->com_thrd[j].high |= + cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); + req->com_thrd[j].low = + cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S); + req->com_thrd[j].low |= + cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); + } + } + + /* Send 2 descriptors at one time */ + ret = hclge_cmd_send(&hdev->hw, desc, 2); + if (ret) + dev_err(&hdev->pdev->dev, + "common threshold config cmd failed %d\n", ret); + return ret; +} + +static int hclge_common_wl_config(struct hclge_dev *hdev, + struct hclge_pkt_buf_alloc *buf_alloc) +{ + struct hclge_shared_buf *buf = &buf_alloc->s_buf; + struct hclge_rx_com_wl *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false); + + req = (struct hclge_rx_com_wl *)desc.data; + req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S); + req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); + + req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S); + req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "common waterline config cmd failed %d\n", ret); + + return ret; +} + +int hclge_buffer_alloc(struct hclge_dev *hdev) +{ + struct hclge_pkt_buf_alloc *pkt_buf; + int ret; + + pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL); + if (!pkt_buf) + return -ENOMEM; + + ret = hclge_tx_buffer_calc(hdev, pkt_buf); + if (ret) { + dev_err(&hdev->pdev->dev, + "could not calc tx buffer size for all TCs %d\n", ret); + goto out; + } + + ret = hclge_tx_buffer_alloc(hdev, pkt_buf); + if (ret) { + dev_err(&hdev->pdev->dev, + "could not alloc tx buffers %d\n", ret); + goto out; + } + + ret = hclge_rx_buffer_calc(hdev, pkt_buf); + if (ret) { + dev_err(&hdev->pdev->dev, + "could not calc rx priv buffer size for all TCs %d\n", + ret); + goto out; + } + + ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf); + if (ret) { + dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n", + ret); + goto out; + } + + if (hnae3_dev_dcb_supported(hdev)) { + ret = hclge_rx_priv_wl_config(hdev, pkt_buf); + if (ret) { + dev_err(&hdev->pdev->dev, + "could not configure rx private waterline %d\n", + ret); + goto out; + } + + ret = hclge_common_thrd_config(hdev, pkt_buf); + if (ret) { + dev_err(&hdev->pdev->dev, + "could not configure common threshold %d\n", + ret); + goto out; + } + } + + ret = hclge_common_wl_config(hdev, pkt_buf); + if (ret) + dev_err(&hdev->pdev->dev, + "could not configure common waterline %d\n", ret); + +out: + kfree(pkt_buf); + return ret; +} + +static int hclge_init_roce_base_info(struct hclge_vport *vport) +{ + struct hnae3_handle *roce = &vport->roce; + struct hnae3_handle *nic = &vport->nic; + struct hclge_dev *hdev = vport->back; + + roce->rinfo.num_vectors = vport->back->num_roce_msi; + + if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi) + return -EINVAL; + + roce->rinfo.base_vector = hdev->num_nic_msi; + + roce->rinfo.netdev = nic->kinfo.netdev; + roce->rinfo.roce_io_base = hdev->hw.hw.io_base; + roce->rinfo.roce_mem_base = hdev->hw.hw.mem_base; + + roce->pdev = nic->pdev; + roce->ae_algo = nic->ae_algo; + roce->numa_node_mask = nic->numa_node_mask; + + return 0; +} + +static int hclge_init_msi(struct hclge_dev *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + int vectors; + int i; + + vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM, + hdev->num_msi, + PCI_IRQ_MSI | PCI_IRQ_MSIX); + if (vectors < 0) { + dev_err(&pdev->dev, + "failed(%d) to allocate MSI/MSI-X vectors\n", + vectors); + return vectors; + } + if (vectors < hdev->num_msi) + dev_warn(&hdev->pdev->dev, + "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n", + hdev->num_msi, vectors); + + hdev->num_msi = vectors; + hdev->num_msi_left = vectors; + + hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, + sizeof(u16), GFP_KERNEL); + if (!hdev->vector_status) { + pci_free_irq_vectors(pdev); + return -ENOMEM; + } + + for (i = 0; i < hdev->num_msi; i++) + hdev->vector_status[i] = HCLGE_INVALID_VPORT; + + hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, + sizeof(int), GFP_KERNEL); + if (!hdev->vector_irq) { + pci_free_irq_vectors(pdev); + return -ENOMEM; + } + + return 0; +} + +static u8 hclge_check_speed_dup(u8 duplex, int speed) +{ + if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M)) + duplex = HCLGE_MAC_FULL; + + return duplex; +} + +static struct hclge_mac_speed_map hclge_mac_speed_map_to_fw[] = { + {HCLGE_MAC_SPEED_10M, HCLGE_FW_MAC_SPEED_10M}, + {HCLGE_MAC_SPEED_100M, HCLGE_FW_MAC_SPEED_100M}, + {HCLGE_MAC_SPEED_1G, HCLGE_FW_MAC_SPEED_1G}, + {HCLGE_MAC_SPEED_10G, HCLGE_FW_MAC_SPEED_10G}, + {HCLGE_MAC_SPEED_25G, HCLGE_FW_MAC_SPEED_25G}, + {HCLGE_MAC_SPEED_40G, HCLGE_FW_MAC_SPEED_40G}, + {HCLGE_MAC_SPEED_50G, HCLGE_FW_MAC_SPEED_50G}, + {HCLGE_MAC_SPEED_100G, HCLGE_FW_MAC_SPEED_100G}, + {HCLGE_MAC_SPEED_200G, HCLGE_FW_MAC_SPEED_200G}, +}; + +static int hclge_convert_to_fw_speed(u32 speed_drv, u32 *speed_fw) +{ + u16 i; + + for (i = 0; i < ARRAY_SIZE(hclge_mac_speed_map_to_fw); i++) { + if (hclge_mac_speed_map_to_fw[i].speed_drv == speed_drv) { + *speed_fw = hclge_mac_speed_map_to_fw[i].speed_fw; + return 0; + } + } + + return -EINVAL; +} + +static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed, + u8 duplex, u8 lane_num) +{ + struct hclge_config_mac_speed_dup_cmd *req; + struct hclge_desc desc; + u32 speed_fw; + int ret; + + req = (struct hclge_config_mac_speed_dup_cmd *)desc.data; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false); + + if (duplex) + hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1); + + ret = hclge_convert_to_fw_speed(speed, &speed_fw); + if (ret) { + dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed); + return ret; + } + + hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, HCLGE_CFG_SPEED_S, + speed_fw); + hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B, + 1); + req->lane_num = lane_num; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "mac speed/duplex config cmd failed %d.\n", ret); + return ret; + } + + return 0; +} + +int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex, u8 lane_num) +{ + struct hclge_mac *mac = &hdev->hw.mac; + int ret; + + duplex = hclge_check_speed_dup(duplex, speed); + if (!mac->support_autoneg && mac->speed == speed && + mac->duplex == duplex && (mac->lane_num == lane_num || lane_num == 0)) + return 0; + + ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex, lane_num); + if (ret) + return ret; + + hdev->hw.mac.speed = speed; + hdev->hw.mac.duplex = duplex; + if (!lane_num) + hdev->hw.mac.lane_num = lane_num; + + return 0; +} + +static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed, + u8 duplex, u8 lane_num) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + return hclge_cfg_mac_speed_dup(hdev, speed, duplex, lane_num); +} + +static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable) +{ + struct hclge_config_auto_neg_cmd *req; + struct hclge_desc desc; + u32 flag = 0; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false); + + req = (struct hclge_config_auto_neg_cmd *)desc.data; + if (enable) + hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U); + req->cfg_an_cmd_flag = cpu_to_le32(flag); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n", + ret); + + return ret; +} + +static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + if (!hdev->hw.mac.support_autoneg) { + if (enable) { + dev_err(&hdev->pdev->dev, + "autoneg is not supported by current port\n"); + return -EOPNOTSUPP; + } else { + return 0; + } + } + + return hclge_set_autoneg_en(hdev, enable); +} + +static int hclge_get_autoneg(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct phy_device *phydev = hdev->hw.mac.phydev; + + if (phydev) + return phydev->autoneg; + + return hdev->hw.mac.autoneg; +} + +static int hclge_restart_autoneg(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + int ret; + + dev_dbg(&hdev->pdev->dev, "restart autoneg\n"); + + ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); + if (ret) + return ret; + return hclge_notify_client(hdev, HNAE3_UP_CLIENT); +} + +static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg) + return hclge_set_autoneg_en(hdev, !halt); + + return 0; +} + +static void hclge_parse_fec_stats_lanes(struct hclge_dev *hdev, + struct hclge_desc *desc, u32 desc_len) +{ + u32 lane_size = HCLGE_FEC_STATS_MAX_LANES * 2; + u32 desc_index = 0; + u32 data_index = 0; + u32 i; + + for (i = 0; i < lane_size; i++) { + if (data_index >= HCLGE_DESC_DATA_LEN) { + desc_index++; + data_index = 0; + } + + if (desc_index >= desc_len) + return; + + hdev->fec_stats.per_lanes[i] += + le32_to_cpu(desc[desc_index].data[data_index]); + data_index++; + } +} + +static void hclge_parse_fec_stats(struct hclge_dev *hdev, + struct hclge_desc *desc, u32 desc_len) +{ + struct hclge_query_fec_stats_cmd *req; + + req = (struct hclge_query_fec_stats_cmd *)desc[0].data; + + hdev->fec_stats.base_r_lane_num = req->base_r_lane_num; + hdev->fec_stats.rs_corr_blocks += + le32_to_cpu(req->rs_fec_corr_blocks); + hdev->fec_stats.rs_uncorr_blocks += + le32_to_cpu(req->rs_fec_uncorr_blocks); + hdev->fec_stats.rs_error_blocks += + le32_to_cpu(req->rs_fec_error_blocks); + hdev->fec_stats.base_r_corr_blocks += + le32_to_cpu(req->base_r_fec_corr_blocks); + hdev->fec_stats.base_r_uncorr_blocks += + le32_to_cpu(req->base_r_fec_uncorr_blocks); + + hclge_parse_fec_stats_lanes(hdev, &desc[1], desc_len - 1); +} + +static int hclge_update_fec_stats_hw(struct hclge_dev *hdev) +{ + struct hclge_desc desc[HCLGE_FEC_STATS_CMD_NUM]; + int ret; + u32 i; + + for (i = 0; i < HCLGE_FEC_STATS_CMD_NUM; i++) { + hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_FEC_STATS, + true); + if (i != (HCLGE_FEC_STATS_CMD_NUM - 1)) + desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + } + + ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_FEC_STATS_CMD_NUM); + if (ret) + return ret; + + hclge_parse_fec_stats(hdev, desc, HCLGE_FEC_STATS_CMD_NUM); + + return 0; +} + +static void hclge_update_fec_stats(struct hclge_dev *hdev) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); + int ret; + + if (!hnae3_ae_dev_fec_stats_supported(ae_dev) || + test_and_set_bit(HCLGE_STATE_FEC_STATS_UPDATING, &hdev->state)) + return; + + ret = hclge_update_fec_stats_hw(hdev); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to update fec stats, ret = %d\n", ret); + + clear_bit(HCLGE_STATE_FEC_STATS_UPDATING, &hdev->state); +} + +static void hclge_get_fec_stats_total(struct hclge_dev *hdev, + struct ethtool_fec_stats *fec_stats) +{ + fec_stats->corrected_blocks.total = hdev->fec_stats.rs_corr_blocks; + fec_stats->uncorrectable_blocks.total = + hdev->fec_stats.rs_uncorr_blocks; +} + +static void hclge_get_fec_stats_lanes(struct hclge_dev *hdev, + struct ethtool_fec_stats *fec_stats) +{ + u32 i; + + if (hdev->fec_stats.base_r_lane_num == 0 || + hdev->fec_stats.base_r_lane_num > HCLGE_FEC_STATS_MAX_LANES) { + dev_err(&hdev->pdev->dev, + "fec stats lane number(%llu) is invalid\n", + hdev->fec_stats.base_r_lane_num); + return; + } + + for (i = 0; i < hdev->fec_stats.base_r_lane_num; i++) { + fec_stats->corrected_blocks.lanes[i] = + hdev->fec_stats.base_r_corr_per_lanes[i]; + fec_stats->uncorrectable_blocks.lanes[i] = + hdev->fec_stats.base_r_uncorr_per_lanes[i]; + } +} + +static void hclge_comm_get_fec_stats(struct hclge_dev *hdev, + struct ethtool_fec_stats *fec_stats) +{ + u32 fec_mode = hdev->hw.mac.fec_mode; + + switch (fec_mode) { + case BIT(HNAE3_FEC_RS): + case BIT(HNAE3_FEC_LLRS): + hclge_get_fec_stats_total(hdev, fec_stats); + break; + case BIT(HNAE3_FEC_BASER): + hclge_get_fec_stats_lanes(hdev, fec_stats); + break; + default: + dev_err(&hdev->pdev->dev, + "fec stats is not supported by current fec mode(0x%x)\n", + fec_mode); + break; + } +} + +static void hclge_get_fec_stats(struct hnae3_handle *handle, + struct ethtool_fec_stats *fec_stats) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + u32 fec_mode = hdev->hw.mac.fec_mode; + + if (fec_mode == BIT(HNAE3_FEC_NONE) || + fec_mode == BIT(HNAE3_FEC_AUTO) || + fec_mode == BIT(HNAE3_FEC_USER_DEF)) + return; + + hclge_update_fec_stats(hdev); + + hclge_comm_get_fec_stats(hdev, fec_stats); +} + +static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode) +{ + struct hclge_config_fec_cmd *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false); + + req = (struct hclge_config_fec_cmd *)desc.data; + if (fec_mode & BIT(HNAE3_FEC_AUTO)) + hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1); + if (fec_mode & BIT(HNAE3_FEC_RS)) + hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M, + HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS); + if (fec_mode & BIT(HNAE3_FEC_LLRS)) + hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M, + HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_LLRS); + if (fec_mode & BIT(HNAE3_FEC_BASER)) + hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M, + HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret); + + return ret; +} + +static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hclge_mac *mac = &hdev->hw.mac; + int ret; + + if (fec_mode && !(mac->fec_ability & fec_mode)) { + dev_err(&hdev->pdev->dev, "unsupported fec mode\n"); + return -EINVAL; + } + + ret = hclge_set_fec_hw(hdev, fec_mode); + if (ret) + return ret; + + mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF); + return 0; +} + +static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability, + u8 *fec_mode) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hclge_mac *mac = &hdev->hw.mac; + + if (fec_ability) + *fec_ability = mac->fec_ability; + if (fec_mode) + *fec_mode = mac->fec_mode; +} + +static int hclge_mac_init(struct hclge_dev *hdev) +{ + struct hclge_mac *mac = &hdev->hw.mac; + int ret; + + hdev->support_sfp_query = true; + hdev->hw.mac.duplex = HCLGE_MAC_FULL; + ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed, + hdev->hw.mac.duplex, hdev->hw.mac.lane_num); + if (ret) + return ret; + + if (hdev->hw.mac.support_autoneg) { + ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg); + if (ret) + return ret; + } + + mac->link = 0; + + if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) { + ret = hclge_set_fec_hw(hdev, mac->user_fec_mode); + if (ret) + return ret; + } + + ret = hclge_set_mac_mtu(hdev, hdev->mps); + if (ret) { + dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret); + return ret; + } + + ret = hclge_set_default_loopback(hdev); + if (ret) + return ret; + + ret = hclge_buffer_alloc(hdev); + if (ret) + dev_err(&hdev->pdev->dev, + "allocate buffer fail, ret=%d\n", ret); + + return ret; +} + +static void hclge_mbx_task_schedule(struct hclge_dev *hdev) +{ + if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && + !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state)) { + hdev->last_mbx_scheduled = jiffies; + mod_delayed_work(hclge_wq, &hdev->service_task, 0); + } +} + +static void hclge_reset_task_schedule(struct hclge_dev *hdev) +{ + if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && + test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state) && + !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) { + hdev->last_rst_scheduled = jiffies; + mod_delayed_work(hclge_wq, &hdev->service_task, 0); + } +} + +static void hclge_errhand_task_schedule(struct hclge_dev *hdev) +{ + if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && + !test_and_set_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state)) + mod_delayed_work(hclge_wq, &hdev->service_task, 0); +} + +void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time) +{ + if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && + !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) + mod_delayed_work(hclge_wq, &hdev->service_task, delay_time); +} + +static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status) +{ + struct hclge_link_status_cmd *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n", + ret); + return ret; + } + + req = (struct hclge_link_status_cmd *)desc.data; + *link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ? + HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN; + + return 0; +} + +static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status) +{ + struct phy_device *phydev = hdev->hw.mac.phydev; + + *link_status = HCLGE_LINK_STATUS_DOWN; + + if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) + return 0; + + if (phydev && (phydev->state != PHY_RUNNING || !phydev->link)) + return 0; + + return hclge_get_mac_link_status(hdev, link_status); +} + +static void hclge_push_link_status(struct hclge_dev *hdev) +{ + struct hclge_vport *vport; + int ret; + u16 i; + + for (i = 0; i < pci_num_vf(hdev->pdev); i++) { + vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM]; + + if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) || + vport->vf_info.link_state != IFLA_VF_LINK_STATE_AUTO) + continue; + + ret = hclge_push_vf_link_status(vport); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to push link status to vf%u, ret = %d\n", + i, ret); + } + } +} + +static void hclge_update_link_status(struct hclge_dev *hdev) +{ + struct hnae3_handle *rhandle = &hdev->vport[0].roce; + struct hnae3_handle *handle = &hdev->vport[0].nic; + struct hnae3_client *rclient = hdev->roce_client; + struct hnae3_client *client = hdev->nic_client; + int state; + int ret; + + if (!client) + return; + + if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state)) + return; + + ret = hclge_get_mac_phy_link(hdev, &state); + if (ret) { + clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state); + return; + } + + if (state != hdev->hw.mac.link) { + hdev->hw.mac.link = state; + if (state == HCLGE_LINK_STATUS_UP) + hclge_update_port_info(hdev); + + client->ops->link_status_change(handle, state); + hclge_config_mac_tnl_int(hdev, state); + if (rclient && rclient->ops->link_status_change) + rclient->ops->link_status_change(rhandle, state); + + hclge_push_link_status(hdev); + } + + clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state); +} + +static void hclge_update_speed_advertising(struct hclge_mac *mac) +{ + u32 speed_ability; + + if (hclge_get_speed_bit(mac->speed, &speed_ability)) + return; + + switch (mac->module_type) { + case HNAE3_MODULE_TYPE_FIBRE_LR: + hclge_convert_setting_lr(speed_ability, mac->advertising); + break; + case HNAE3_MODULE_TYPE_FIBRE_SR: + case HNAE3_MODULE_TYPE_AOC: + hclge_convert_setting_sr(speed_ability, mac->advertising); + break; + case HNAE3_MODULE_TYPE_CR: + hclge_convert_setting_cr(speed_ability, mac->advertising); + break; + case HNAE3_MODULE_TYPE_KR: + hclge_convert_setting_kr(speed_ability, mac->advertising); + break; + default: + break; + } +} + +static void hclge_update_fec_advertising(struct hclge_mac *mac) +{ + if (mac->fec_mode & BIT(HNAE3_FEC_RS)) + linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, + mac->advertising); + else if (mac->fec_mode & BIT(HNAE3_FEC_LLRS)) + linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT, + mac->advertising); + else if (mac->fec_mode & BIT(HNAE3_FEC_BASER)) + linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, + mac->advertising); + else + linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, + mac->advertising); +} + +static void hclge_update_pause_advertising(struct hclge_dev *hdev) +{ + struct hclge_mac *mac = &hdev->hw.mac; + bool rx_en, tx_en; + + switch (hdev->fc_mode_last_time) { + case HCLGE_FC_RX_PAUSE: + rx_en = true; + tx_en = false; + break; + case HCLGE_FC_TX_PAUSE: + rx_en = false; + tx_en = true; + break; + case HCLGE_FC_FULL: + rx_en = true; + tx_en = true; + break; + default: + rx_en = false; + tx_en = false; + break; + } + + linkmode_set_pause(mac->advertising, tx_en, rx_en); +} + +static void hclge_update_advertising(struct hclge_dev *hdev) +{ + struct hclge_mac *mac = &hdev->hw.mac; + + linkmode_zero(mac->advertising); + hclge_update_speed_advertising(mac); + hclge_update_fec_advertising(mac); + hclge_update_pause_advertising(hdev); +} + +static void hclge_update_port_capability(struct hclge_dev *hdev, + struct hclge_mac *mac) +{ + if (hnae3_dev_fec_supported(hdev)) + hclge_convert_setting_fec(mac); + + /* firmware can not identify back plane type, the media type + * read from configuration can help deal it + */ + if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE && + mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN) + mac->module_type = HNAE3_MODULE_TYPE_KR; + else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER) + mac->module_type = HNAE3_MODULE_TYPE_TP; + + if (mac->support_autoneg) { + linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported); + linkmode_copy(mac->advertising, mac->supported); + } else { + linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, + mac->supported); + hclge_update_advertising(hdev); + } +} + +static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed) +{ + struct hclge_sfp_info_cmd *resp; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true); + resp = (struct hclge_sfp_info_cmd *)desc.data; + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret == -EOPNOTSUPP) { + dev_warn(&hdev->pdev->dev, + "IMP do not support get SFP speed %d\n", ret); + return ret; + } else if (ret) { + dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret); + return ret; + } + + *speed = le32_to_cpu(resp->speed); + + return 0; +} + +static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac) +{ + struct hclge_sfp_info_cmd *resp; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true); + resp = (struct hclge_sfp_info_cmd *)desc.data; + + resp->query_type = QUERY_ACTIVE_SPEED; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret == -EOPNOTSUPP) { + dev_warn(&hdev->pdev->dev, + "IMP does not support get SFP info %d\n", ret); + return ret; + } else if (ret) { + dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret); + return ret; + } + + /* In some case, mac speed get from IMP may be 0, it shouldn't be + * set to mac->speed. + */ + if (!le32_to_cpu(resp->speed)) + return 0; + + mac->speed = le32_to_cpu(resp->speed); + /* if resp->speed_ability is 0, it means it's an old version + * firmware, do not update these params + */ + if (resp->speed_ability) { + mac->module_type = le32_to_cpu(resp->module_type); + mac->speed_ability = le32_to_cpu(resp->speed_ability); + mac->autoneg = resp->autoneg; + mac->support_autoneg = resp->autoneg_ability; + mac->speed_type = QUERY_ACTIVE_SPEED; + mac->lane_num = resp->lane_num; + if (!resp->active_fec) + mac->fec_mode = 0; + else + mac->fec_mode = BIT(resp->active_fec); + mac->fec_ability = resp->fec_ability; + } else { + mac->speed_type = QUERY_SFP_SPEED; + } + + return 0; +} + +static int hclge_get_phy_link_ksettings(struct hnae3_handle *handle, + struct ethtool_link_ksettings *cmd) +{ + struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM]; + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_phy_link_ksetting_0_cmd *req0; + struct hclge_phy_link_ksetting_1_cmd *req1; + u32 supported, advertising, lp_advertising; + struct hclge_dev *hdev = vport->back; + int ret; + + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING, + true); + desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING, + true); + + ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get phy link ksetting, ret = %d.\n", ret); + return ret; + } + + req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data; + cmd->base.autoneg = req0->autoneg; + cmd->base.speed = le32_to_cpu(req0->speed); + cmd->base.duplex = req0->duplex; + cmd->base.port = req0->port; + cmd->base.transceiver = req0->transceiver; + cmd->base.phy_address = req0->phy_address; + cmd->base.eth_tp_mdix = req0->eth_tp_mdix; + cmd->base.eth_tp_mdix_ctrl = req0->eth_tp_mdix_ctrl; + supported = le32_to_cpu(req0->supported); + advertising = le32_to_cpu(req0->advertising); + lp_advertising = le32_to_cpu(req0->lp_advertising); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising, + lp_advertising); + + req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data; + cmd->base.master_slave_cfg = req1->master_slave_cfg; + cmd->base.master_slave_state = req1->master_slave_state; + + return 0; +} + +static int +hclge_set_phy_link_ksettings(struct hnae3_handle *handle, + const struct ethtool_link_ksettings *cmd) +{ + struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM]; + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_phy_link_ksetting_0_cmd *req0; + struct hclge_phy_link_ksetting_1_cmd *req1; + struct hclge_dev *hdev = vport->back; + u32 advertising; + int ret; + + if (cmd->base.autoneg == AUTONEG_DISABLE && + ((cmd->base.speed != SPEED_100 && cmd->base.speed != SPEED_10) || + (cmd->base.duplex != DUPLEX_HALF && + cmd->base.duplex != DUPLEX_FULL))) + return -EINVAL; + + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING, + false); + desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING, + false); + + req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data; + req0->autoneg = cmd->base.autoneg; + req0->speed = cpu_to_le32(cmd->base.speed); + req0->duplex = cmd->base.duplex; + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); + req0->advertising = cpu_to_le32(advertising); + req0->eth_tp_mdix_ctrl = cmd->base.eth_tp_mdix_ctrl; + + req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data; + req1->master_slave_cfg = cmd->base.master_slave_cfg; + + ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to set phy link ksettings, ret = %d.\n", ret); + return ret; + } + + hdev->hw.mac.autoneg = cmd->base.autoneg; + hdev->hw.mac.speed = cmd->base.speed; + hdev->hw.mac.duplex = cmd->base.duplex; + linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising); + + return 0; +} + +static int hclge_update_tp_port_info(struct hclge_dev *hdev) +{ + struct ethtool_link_ksettings cmd; + int ret; + + if (!hnae3_dev_phy_imp_supported(hdev)) + return 0; + + ret = hclge_get_phy_link_ksettings(&hdev->vport->nic, &cmd); + if (ret) + return ret; + + hdev->hw.mac.autoneg = cmd.base.autoneg; + hdev->hw.mac.speed = cmd.base.speed; + hdev->hw.mac.duplex = cmd.base.duplex; + linkmode_copy(hdev->hw.mac.advertising, cmd.link_modes.advertising); + + return 0; +} + +static int hclge_tp_port_init(struct hclge_dev *hdev) +{ + struct ethtool_link_ksettings cmd; + + if (!hnae3_dev_phy_imp_supported(hdev)) + return 0; + + cmd.base.autoneg = hdev->hw.mac.autoneg; + cmd.base.speed = hdev->hw.mac.speed; + cmd.base.duplex = hdev->hw.mac.duplex; + linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising); + + return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd); +} + +static int hclge_update_port_info(struct hclge_dev *hdev) +{ + struct hclge_mac *mac = &hdev->hw.mac; + int speed; + int ret; + + /* get the port info from SFP cmd if not copper port */ + if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER) + return hclge_update_tp_port_info(hdev); + + /* if IMP does not support get SFP/qSFP info, return directly */ + if (!hdev->support_sfp_query) + return 0; + + if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { + speed = mac->speed; + ret = hclge_get_sfp_info(hdev, mac); + } else { + speed = HCLGE_MAC_SPEED_UNKNOWN; + ret = hclge_get_sfp_speed(hdev, &speed); + } + + if (ret == -EOPNOTSUPP) { + hdev->support_sfp_query = false; + return ret; + } else if (ret) { + return ret; + } + + if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { + if (mac->speed_type == QUERY_ACTIVE_SPEED) { + hclge_update_port_capability(hdev, mac); + if (mac->speed != speed) + (void)hclge_tm_port_shaper_cfg(hdev); + return 0; + } + return hclge_cfg_mac_speed_dup(hdev, mac->speed, + HCLGE_MAC_FULL, mac->lane_num); + } else { + if (speed == HCLGE_MAC_SPEED_UNKNOWN) + return 0; /* do nothing if no SFP */ + + /* must config full duplex for SFP */ + return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL, 0); + } +} + +static int hclge_get_status(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + hclge_update_link_status(hdev); + + return hdev->hw.mac.link; +} + +static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf) +{ + if (!pci_num_vf(hdev->pdev)) { + dev_err(&hdev->pdev->dev, + "SRIOV is disabled, can not get vport(%d) info.\n", vf); + return NULL; + } + + if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) { + dev_err(&hdev->pdev->dev, + "vf id(%d) is out of range(0 <= vfid < %d)\n", + vf, pci_num_vf(hdev->pdev)); + return NULL; + } + + /* VF start from 1 in vport */ + vf += HCLGE_VF_VPORT_START_NUM; + return &hdev->vport[vf]; +} + +static int hclge_get_vf_config(struct hnae3_handle *handle, int vf, + struct ifla_vf_info *ivf) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + vport = hclge_get_vf_vport(hdev, vf); + if (!vport) + return -EINVAL; + + ivf->vf = vf; + ivf->linkstate = vport->vf_info.link_state; + ivf->spoofchk = vport->vf_info.spoofchk; + ivf->trusted = vport->vf_info.trusted; + ivf->min_tx_rate = 0; + ivf->max_tx_rate = vport->vf_info.max_tx_rate; + ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag; + ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto); + ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos; + ether_addr_copy(ivf->mac, vport->vf_info.mac); + + return 0; +} + +static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf, + int link_state) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + int link_state_old; + int ret; + + vport = hclge_get_vf_vport(hdev, vf); + if (!vport) + return -EINVAL; + + link_state_old = vport->vf_info.link_state; + vport->vf_info.link_state = link_state; + + /* return success directly if the VF is unalive, VF will + * query link state itself when it starts work. + */ + if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) + return 0; + + ret = hclge_push_vf_link_status(vport); + if (ret) { + vport->vf_info.link_state = link_state_old; + dev_err(&hdev->pdev->dev, + "failed to push vf%d link status, ret = %d\n", vf, ret); + } + + return ret; +} + +static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) +{ + u32 cmdq_src_reg, msix_src_reg, hw_err_src_reg; + + /* fetch the events from their corresponding regs */ + cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG); + msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS); + hw_err_src_reg = hclge_read_dev(&hdev->hw, + HCLGE_RAS_PF_OTHER_INT_STS_REG); + + /* Assumption: If by any chance reset and mailbox events are reported + * together then we will only process reset event in this go and will + * defer the processing of the mailbox events. Since, we would have not + * cleared RX CMDQ event this time we would receive again another + * interrupt from H/W just for the mailbox. + * + * check for vector0 reset event sources + */ + if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) { + dev_info(&hdev->pdev->dev, "IMP reset interrupt\n"); + set_bit(HNAE3_IMP_RESET, &hdev->reset_pending); + set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); + *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B); + hdev->rst_stats.imp_rst_cnt++; + return HCLGE_VECTOR0_EVENT_RST; + } + + if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) { + dev_info(&hdev->pdev->dev, "global reset interrupt\n"); + set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); + set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending); + *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B); + hdev->rst_stats.global_rst_cnt++; + return HCLGE_VECTOR0_EVENT_RST; + } + + /* check for vector0 msix event and hardware error event source */ + if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK || + hw_err_src_reg & HCLGE_RAS_REG_ERR_MASK) + return HCLGE_VECTOR0_EVENT_ERR; + + /* check for vector0 ptp event source */ + if (BIT(HCLGE_VECTOR0_REG_PTP_INT_B) & msix_src_reg) { + *clearval = msix_src_reg; + return HCLGE_VECTOR0_EVENT_PTP; + } + + /* check for vector0 mailbox(=CMDQ RX) event source */ + if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { + cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B); + *clearval = cmdq_src_reg; + return HCLGE_VECTOR0_EVENT_MBX; + } + + /* print other vector0 event source */ + dev_info(&hdev->pdev->dev, + "INT status: CMDQ(%#x) HW errors(%#x) other(%#x)\n", + cmdq_src_reg, hw_err_src_reg, msix_src_reg); + + return HCLGE_VECTOR0_EVENT_OTHER; +} + +static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type, + u32 regclr) +{ +#define HCLGE_IMP_RESET_DELAY 5 + + switch (event_type) { + case HCLGE_VECTOR0_EVENT_PTP: + case HCLGE_VECTOR0_EVENT_RST: + if (regclr == BIT(HCLGE_VECTOR0_IMPRESET_INT_B)) + mdelay(HCLGE_IMP_RESET_DELAY); + + hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr); + break; + case HCLGE_VECTOR0_EVENT_MBX: + hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr); + break; + default: + break; + } +} + +static void hclge_clear_all_event_cause(struct hclge_dev *hdev) +{ + hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST, + BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) | + BIT(HCLGE_VECTOR0_CORERESET_INT_B) | + BIT(HCLGE_VECTOR0_IMPRESET_INT_B)); + hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0); +} + +static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable) +{ + writel(enable ? 1 : 0, vector->addr); +} + +static irqreturn_t hclge_misc_irq_handle(int irq, void *data) +{ + struct hclge_dev *hdev = data; + unsigned long flags; + u32 clearval = 0; + u32 event_cause; + + hclge_enable_vector(&hdev->misc_vector, false); + event_cause = hclge_check_event_cause(hdev, &clearval); + + /* vector 0 interrupt is shared with reset and mailbox source events. */ + switch (event_cause) { + case HCLGE_VECTOR0_EVENT_ERR: + hclge_errhand_task_schedule(hdev); + break; + case HCLGE_VECTOR0_EVENT_RST: + hclge_reset_task_schedule(hdev); + break; + case HCLGE_VECTOR0_EVENT_PTP: + spin_lock_irqsave(&hdev->ptp->lock, flags); + hclge_ptp_clean_tx_hwts(hdev); + spin_unlock_irqrestore(&hdev->ptp->lock, flags); + break; + case HCLGE_VECTOR0_EVENT_MBX: + /* If we are here then, + * 1. Either we are not handling any mbx task and we are not + * scheduled as well + * OR + * 2. We could be handling a mbx task but nothing more is + * scheduled. + * In both cases, we should schedule mbx task as there are more + * mbx messages reported by this interrupt. + */ + hclge_mbx_task_schedule(hdev); + break; + default: + dev_warn(&hdev->pdev->dev, + "received unknown or unhandled event of vector0\n"); + break; + } + + hclge_clear_event_cause(hdev, event_cause, clearval); + + /* Enable interrupt if it is not caused by reset event or error event */ + if (event_cause == HCLGE_VECTOR0_EVENT_PTP || + event_cause == HCLGE_VECTOR0_EVENT_MBX || + event_cause == HCLGE_VECTOR0_EVENT_OTHER) + hclge_enable_vector(&hdev->misc_vector, true); + + return IRQ_HANDLED; +} + +static void hclge_free_vector(struct hclge_dev *hdev, int vector_id) +{ + if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) { + dev_warn(&hdev->pdev->dev, + "vector(vector_id %d) has been freed.\n", vector_id); + return; + } + + hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT; + hdev->num_msi_left += 1; + hdev->num_msi_used -= 1; +} + +static void hclge_get_misc_vector(struct hclge_dev *hdev) +{ + struct hclge_misc_vector *vector = &hdev->misc_vector; + + vector->vector_irq = pci_irq_vector(hdev->pdev, 0); + + vector->addr = hdev->hw.hw.io_base + HCLGE_MISC_VECTOR_REG_BASE; + hdev->vector_status[0] = 0; + + hdev->num_msi_left -= 1; + hdev->num_msi_used += 1; +} + +static int hclge_misc_irq_init(struct hclge_dev *hdev) +{ + int ret; + + hclge_get_misc_vector(hdev); + + /* this would be explicitly freed in the end */ + snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s", + HCLGE_NAME, pci_name(hdev->pdev)); + ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle, + 0, hdev->misc_vector.name, hdev); + if (ret) { + hclge_free_vector(hdev, 0); + dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n", + hdev->misc_vector.vector_irq); + } + + return ret; +} + +static void hclge_misc_irq_uninit(struct hclge_dev *hdev) +{ + free_irq(hdev->misc_vector.vector_irq, hdev); + hclge_free_vector(hdev, 0); +} + +int hclge_notify_client(struct hclge_dev *hdev, + enum hnae3_reset_notify_type type) +{ + struct hnae3_handle *handle = &hdev->vport[0].nic; + struct hnae3_client *client = hdev->nic_client; + int ret; + + if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client) + return 0; + + if (!client->ops->reset_notify) + return -EOPNOTSUPP; + + ret = client->ops->reset_notify(handle, type); + if (ret) + dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n", + type, ret); + + return ret; +} + +static int hclge_notify_roce_client(struct hclge_dev *hdev, + enum hnae3_reset_notify_type type) +{ + struct hnae3_handle *handle = &hdev->vport[0].roce; + struct hnae3_client *client = hdev->roce_client; + int ret; + + if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client) + return 0; + + if (!client->ops->reset_notify) + return -EOPNOTSUPP; + + ret = client->ops->reset_notify(handle, type); + if (ret) + dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)", + type, ret); + + return ret; +} + +static int hclge_reset_wait(struct hclge_dev *hdev) +{ +#define HCLGE_RESET_WATI_MS 100 +#define HCLGE_RESET_WAIT_CNT 350 + + u32 val, reg, reg_bit; + u32 cnt = 0; + + switch (hdev->reset_type) { + case HNAE3_IMP_RESET: + reg = HCLGE_GLOBAL_RESET_REG; + reg_bit = HCLGE_IMP_RESET_BIT; + break; + case HNAE3_GLOBAL_RESET: + reg = HCLGE_GLOBAL_RESET_REG; + reg_bit = HCLGE_GLOBAL_RESET_BIT; + break; + case HNAE3_FUNC_RESET: + reg = HCLGE_FUN_RST_ING; + reg_bit = HCLGE_FUN_RST_ING_B; + break; + default: + dev_err(&hdev->pdev->dev, + "Wait for unsupported reset type: %d\n", + hdev->reset_type); + return -EINVAL; + } + + val = hclge_read_dev(&hdev->hw, reg); + while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) { + msleep(HCLGE_RESET_WATI_MS); + val = hclge_read_dev(&hdev->hw, reg); + cnt++; + } + + if (cnt >= HCLGE_RESET_WAIT_CNT) { + dev_warn(&hdev->pdev->dev, + "Wait for reset timeout: %d\n", hdev->reset_type); + return -EBUSY; + } + + return 0; +} + +static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset) +{ + struct hclge_vf_rst_cmd *req; + struct hclge_desc desc; + + req = (struct hclge_vf_rst_cmd *)desc.data; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false); + req->dest_vfid = func_id; + + if (reset) + req->vf_rst = 0x1; + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset) +{ + int i; + + for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++) { + struct hclge_vport *vport = &hdev->vport[i]; + int ret; + + /* Send cmd to set/clear VF's FUNC_RST_ING */ + ret = hclge_set_vf_rst(hdev, vport->vport_id, reset); + if (ret) { + dev_err(&hdev->pdev->dev, + "set vf(%u) rst failed %d!\n", + vport->vport_id - HCLGE_VF_VPORT_START_NUM, + ret); + return ret; + } + + if (!reset || + !test_bit(HCLGE_VPORT_STATE_INITED, &vport->state)) + continue; + + if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) && + hdev->reset_type == HNAE3_FUNC_RESET) { + set_bit(HCLGE_VPORT_NEED_NOTIFY_RESET, + &vport->need_notify); + continue; + } + + /* Inform VF to process the reset. + * hclge_inform_reset_assert_to_vf may fail if VF + * driver is not loaded. + */ + ret = hclge_inform_reset_assert_to_vf(vport); + if (ret) + dev_warn(&hdev->pdev->dev, + "inform reset to vf(%u) failed %d!\n", + vport->vport_id - HCLGE_VF_VPORT_START_NUM, + ret); + } + + return 0; +} + +static void hclge_mailbox_service_task(struct hclge_dev *hdev) +{ + if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) || + test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state) || + test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state)) + return; + + if (time_is_before_jiffies(hdev->last_mbx_scheduled + + HCLGE_MBX_SCHED_TIMEOUT)) + dev_warn(&hdev->pdev->dev, + "mbx service task is scheduled after %ums on cpu%u!\n", + jiffies_to_msecs(jiffies - hdev->last_mbx_scheduled), + smp_processor_id()); + + hclge_mbx_handler(hdev); + + clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); +} + +static void hclge_func_reset_sync_vf(struct hclge_dev *hdev) +{ + struct hclge_pf_rst_sync_cmd *req; + struct hclge_desc desc; + int cnt = 0; + int ret; + + req = (struct hclge_pf_rst_sync_cmd *)desc.data; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true); + + do { + /* vf need to down netdev by mbx during PF or FLR reset */ + hclge_mailbox_service_task(hdev); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + /* for compatible with old firmware, wait + * 100 ms for VF to stop IO + */ + if (ret == -EOPNOTSUPP) { + msleep(HCLGE_RESET_SYNC_TIME); + return; + } else if (ret) { + dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n", + ret); + return; + } else if (req->all_vf_ready) { + return; + } + msleep(HCLGE_PF_RESET_SYNC_TIME); + hclge_comm_cmd_reuse_desc(&desc, true); + } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT); + + dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n"); +} + +void hclge_report_hw_error(struct hclge_dev *hdev, + enum hnae3_hw_error_type type) +{ + struct hnae3_client *client = hdev->nic_client; + + if (!client || !client->ops->process_hw_error || + !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state)) + return; + + client->ops->process_hw_error(&hdev->vport[0].nic, type); +} + +static void hclge_handle_imp_error(struct hclge_dev *hdev) +{ + u32 reg_val; + + reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG); + if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) { + hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR); + reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B); + hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val); + } + + if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) { + hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR); + reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B); + hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val); + } +} + +int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id) +{ + struct hclge_desc desc; + struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false); + hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1); + req->fun_reset_vfid = func_id; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "send function reset cmd fail, status =%d\n", ret); + + return ret; +} + +static void hclge_do_reset(struct hclge_dev *hdev) +{ + struct hnae3_handle *handle = &hdev->vport[0].nic; + struct pci_dev *pdev = hdev->pdev; + u32 val; + + if (hclge_get_hw_reset_stat(handle)) { + dev_info(&pdev->dev, "hardware reset not finish\n"); + dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n", + hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING), + hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG)); + return; + } + + switch (hdev->reset_type) { + case HNAE3_IMP_RESET: + dev_info(&pdev->dev, "IMP reset requested\n"); + val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG); + hnae3_set_bit(val, HCLGE_TRIGGER_IMP_RESET_B, 1); + hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, val); + break; + case HNAE3_GLOBAL_RESET: + dev_info(&pdev->dev, "global reset requested\n"); + val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); + hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1); + hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val); + break; + case HNAE3_FUNC_RESET: + dev_info(&pdev->dev, "PF reset requested\n"); + /* schedule again to check later */ + set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending); + hclge_reset_task_schedule(hdev); + break; + default: + dev_warn(&pdev->dev, + "unsupported reset type: %d\n", hdev->reset_type); + break; + } +} + +static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev, + unsigned long *addr) +{ + enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; + struct hclge_dev *hdev = ae_dev->priv; + + /* return the highest priority reset level amongst all */ + if (test_bit(HNAE3_IMP_RESET, addr)) { + rst_level = HNAE3_IMP_RESET; + clear_bit(HNAE3_IMP_RESET, addr); + clear_bit(HNAE3_GLOBAL_RESET, addr); + clear_bit(HNAE3_FUNC_RESET, addr); + } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) { + rst_level = HNAE3_GLOBAL_RESET; + clear_bit(HNAE3_GLOBAL_RESET, addr); + clear_bit(HNAE3_FUNC_RESET, addr); + } else if (test_bit(HNAE3_FUNC_RESET, addr)) { + rst_level = HNAE3_FUNC_RESET; + clear_bit(HNAE3_FUNC_RESET, addr); + } else if (test_bit(HNAE3_FLR_RESET, addr)) { + rst_level = HNAE3_FLR_RESET; + clear_bit(HNAE3_FLR_RESET, addr); + } + + if (hdev->reset_type != HNAE3_NONE_RESET && + rst_level < hdev->reset_type) + return HNAE3_NONE_RESET; + + return rst_level; +} + +static void hclge_clear_reset_cause(struct hclge_dev *hdev) +{ + u32 clearval = 0; + + switch (hdev->reset_type) { + case HNAE3_IMP_RESET: + clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B); + break; + case HNAE3_GLOBAL_RESET: + clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B); + break; + default: + break; + } + + if (!clearval) + return; + + /* For revision 0x20, the reset interrupt source + * can only be cleared after hardware reset done + */ + if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) + hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, + clearval); + + hclge_enable_vector(&hdev->misc_vector, true); +} + +static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable) +{ + u32 reg_val; + + reg_val = hclge_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG); + if (enable) + reg_val |= HCLGE_COMM_NIC_SW_RST_RDY; + else + reg_val &= ~HCLGE_COMM_NIC_SW_RST_RDY; + + hclge_write_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG, reg_val); +} + +static int hclge_func_reset_notify_vf(struct hclge_dev *hdev) +{ + int ret; + + ret = hclge_set_all_vf_rst(hdev, true); + if (ret) + return ret; + + hclge_func_reset_sync_vf(hdev); + + return 0; +} + +static int hclge_reset_prepare_wait(struct hclge_dev *hdev) +{ + u32 reg_val; + int ret = 0; + + switch (hdev->reset_type) { + case HNAE3_FUNC_RESET: + ret = hclge_func_reset_notify_vf(hdev); + if (ret) + return ret; + + ret = hclge_func_reset_cmd(hdev, 0); + if (ret) { + dev_err(&hdev->pdev->dev, + "asserting function reset fail %d!\n", ret); + return ret; + } + + /* After performaning pf reset, it is not necessary to do the + * mailbox handling or send any command to firmware, because + * any mailbox handling or command to firmware is only valid + * after hclge_comm_cmd_init is called. + */ + set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); + hdev->rst_stats.pf_rst_cnt++; + break; + case HNAE3_FLR_RESET: + ret = hclge_func_reset_notify_vf(hdev); + if (ret) + return ret; + break; + case HNAE3_IMP_RESET: + hclge_handle_imp_error(hdev); + reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG); + hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, + BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val); + break; + default: + break; + } + + /* inform hardware that preparatory work is done */ + msleep(HCLGE_RESET_SYNC_TIME); + hclge_reset_handshake(hdev, true); + dev_info(&hdev->pdev->dev, "prepare wait ok\n"); + + return ret; +} + +static void hclge_show_rst_info(struct hclge_dev *hdev) +{ + char *buf; + + buf = kzalloc(HCLGE_DBG_RESET_INFO_LEN, GFP_KERNEL); + if (!buf) + return; + + hclge_dbg_dump_rst_info(hdev, buf, HCLGE_DBG_RESET_INFO_LEN); + + dev_info(&hdev->pdev->dev, "dump reset info:\n%s", buf); + + kfree(buf); +} + +static bool hclge_reset_err_handle(struct hclge_dev *hdev) +{ +#define MAX_RESET_FAIL_CNT 5 + + if (hdev->reset_pending) { + dev_info(&hdev->pdev->dev, "Reset pending %lu\n", + hdev->reset_pending); + return true; + } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) & + HCLGE_RESET_INT_M) { + dev_info(&hdev->pdev->dev, + "reset failed because new reset interrupt\n"); + hclge_clear_reset_cause(hdev); + return false; + } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) { + hdev->rst_stats.reset_fail_cnt++; + set_bit(hdev->reset_type, &hdev->reset_pending); + dev_info(&hdev->pdev->dev, + "re-schedule reset task(%u)\n", + hdev->rst_stats.reset_fail_cnt); + return true; + } + + hclge_clear_reset_cause(hdev); + + /* recover the handshake status when reset fail */ + hclge_reset_handshake(hdev, true); + + dev_err(&hdev->pdev->dev, "Reset fail!\n"); + + hclge_show_rst_info(hdev); + + set_bit(HCLGE_STATE_RST_FAIL, &hdev->state); + + return false; +} + +static void hclge_update_reset_level(struct hclge_dev *hdev) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); + enum hnae3_reset_type reset_level; + + /* reset request will not be set during reset, so clear + * pending reset request to avoid unnecessary reset + * caused by the same reason. + */ + hclge_get_reset_level(ae_dev, &hdev->reset_request); + + /* if default_reset_request has a higher level reset request, + * it should be handled as soon as possible. since some errors + * need this kind of reset to fix. + */ + reset_level = hclge_get_reset_level(ae_dev, + &hdev->default_reset_request); + if (reset_level != HNAE3_NONE_RESET) + set_bit(reset_level, &hdev->reset_request); +} + +static int hclge_set_rst_done(struct hclge_dev *hdev) +{ + struct hclge_pf_rst_done_cmd *req; + struct hclge_desc desc; + int ret; + + req = (struct hclge_pf_rst_done_cmd *)desc.data; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false); + req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + /* To be compatible with the old firmware, which does not support + * command HCLGE_OPC_PF_RST_DONE, just print a warning and + * return success + */ + if (ret == -EOPNOTSUPP) { + dev_warn(&hdev->pdev->dev, + "current firmware does not support command(0x%x)!\n", + HCLGE_OPC_PF_RST_DONE); + return 0; + } else if (ret) { + dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n", + ret); + } + + return ret; +} + +static int hclge_reset_prepare_up(struct hclge_dev *hdev) +{ + int ret = 0; + + switch (hdev->reset_type) { + case HNAE3_FUNC_RESET: + case HNAE3_FLR_RESET: + ret = hclge_set_all_vf_rst(hdev, false); + break; + case HNAE3_GLOBAL_RESET: + case HNAE3_IMP_RESET: + ret = hclge_set_rst_done(hdev); + break; + default: + break; + } + + /* clear up the handshake status after re-initialize done */ + hclge_reset_handshake(hdev, false); + + return ret; +} + +static int hclge_reset_stack(struct hclge_dev *hdev) +{ + int ret; + + ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT); + if (ret) + return ret; + + ret = hclge_reset_ae_dev(hdev->ae_dev); + if (ret) + return ret; + + return hclge_notify_client(hdev, HNAE3_INIT_CLIENT); +} + +static int hclge_reset_prepare(struct hclge_dev *hdev) +{ + int ret; + + hdev->rst_stats.reset_cnt++; + /* perform reset of the stack & ae device for a client */ + ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT); + if (ret) + return ret; + + rtnl_lock(); + ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); + rtnl_unlock(); + if (ret) + return ret; + + return hclge_reset_prepare_wait(hdev); +} + +static int hclge_reset_rebuild(struct hclge_dev *hdev) +{ + int ret; + + hdev->rst_stats.hw_reset_done_cnt++; + + ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT); + if (ret) + return ret; + + rtnl_lock(); + ret = hclge_reset_stack(hdev); + rtnl_unlock(); + if (ret) + return ret; + + hclge_clear_reset_cause(hdev); + + ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT); + /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1 + * times + */ + if (ret && + hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1) + return ret; + + ret = hclge_reset_prepare_up(hdev); + if (ret) + return ret; + + rtnl_lock(); + ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT); + rtnl_unlock(); + if (ret) + return ret; + + ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT); + if (ret) + return ret; + + hdev->last_reset_time = jiffies; + hdev->rst_stats.reset_fail_cnt = 0; + hdev->rst_stats.reset_done_cnt++; + clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state); + + hclge_update_reset_level(hdev); + + return 0; +} + +static void hclge_reset(struct hclge_dev *hdev) +{ + if (hclge_reset_prepare(hdev)) + goto err_reset; + + if (hclge_reset_wait(hdev)) + goto err_reset; + + if (hclge_reset_rebuild(hdev)) + goto err_reset; + + return; + +err_reset: + if (hclge_reset_err_handle(hdev)) + hclge_reset_task_schedule(hdev); +} + +static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); + struct hclge_dev *hdev = ae_dev->priv; + + /* We might end up getting called broadly because of 2 below cases: + * 1. Recoverable error was conveyed through APEI and only way to bring + * normalcy is to reset. + * 2. A new reset request from the stack due to timeout + * + * check if this is a new reset request and we are not here just because + * last reset attempt did not succeed and watchdog hit us again. We will + * know this if last reset request did not occur very recently (watchdog + * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz) + * In case of new request we reset the "reset level" to PF reset. + * And if it is a repeat reset request of the most recent one then we + * want to make sure we throttle the reset request. Therefore, we will + * not allow it again before 3*HZ times. + */ + + if (time_before(jiffies, (hdev->last_reset_time + + HCLGE_RESET_INTERVAL))) { + mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL); + return; + } + + if (hdev->default_reset_request) { + hdev->reset_level = + hclge_get_reset_level(ae_dev, + &hdev->default_reset_request); + } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) { + hdev->reset_level = HNAE3_FUNC_RESET; + } + + dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n", + hdev->reset_level); + + /* request reset & schedule reset task */ + set_bit(hdev->reset_level, &hdev->reset_request); + hclge_reset_task_schedule(hdev); + + if (hdev->reset_level < HNAE3_GLOBAL_RESET) + hdev->reset_level++; +} + +static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev, + enum hnae3_reset_type rst_type) +{ + struct hclge_dev *hdev = ae_dev->priv; + + set_bit(rst_type, &hdev->default_reset_request); +} + +static void hclge_reset_timer(struct timer_list *t) +{ + struct hclge_dev *hdev = from_timer(hdev, t, reset_timer); + + /* if default_reset_request has no value, it means that this reset + * request has already be handled, so just return here + */ + if (!hdev->default_reset_request) + return; + + dev_info(&hdev->pdev->dev, + "triggering reset in reset timer\n"); + hclge_reset_event(hdev->pdev, NULL); +} + +static void hclge_reset_subtask(struct hclge_dev *hdev) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); + + /* check if there is any ongoing reset in the hardware. This status can + * be checked from reset_pending. If there is then, we need to wait for + * hardware to complete reset. + * a. If we are able to figure out in reasonable time that hardware + * has fully resetted then, we can proceed with driver, client + * reset. + * b. else, we can come back later to check this status so re-sched + * now. + */ + hdev->last_reset_time = jiffies; + hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending); + if (hdev->reset_type != HNAE3_NONE_RESET) + hclge_reset(hdev); + + /* check if we got any *new* reset requests to be honored */ + hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request); + if (hdev->reset_type != HNAE3_NONE_RESET) + hclge_do_reset(hdev); + + hdev->reset_type = HNAE3_NONE_RESET; +} + +static void hclge_handle_err_reset_request(struct hclge_dev *hdev) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); + enum hnae3_reset_type reset_type; + + if (ae_dev->hw_err_reset_req) { + reset_type = hclge_get_reset_level(ae_dev, + &ae_dev->hw_err_reset_req); + hclge_set_def_reset_request(ae_dev, reset_type); + } + + if (hdev->default_reset_request && ae_dev->ops->reset_event) + ae_dev->ops->reset_event(hdev->pdev, NULL); + + /* enable interrupt after error handling complete */ + hclge_enable_vector(&hdev->misc_vector, true); +} + +static void hclge_handle_err_recovery(struct hclge_dev *hdev) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); + + ae_dev->hw_err_reset_req = 0; + + if (hclge_find_error_source(hdev)) { + hclge_handle_error_info_log(ae_dev); + hclge_handle_mac_tnl(hdev); + } + + hclge_handle_err_reset_request(hdev); +} + +static void hclge_misc_err_recovery(struct hclge_dev *hdev) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); + struct device *dev = &hdev->pdev->dev; + u32 msix_sts_reg; + + msix_sts_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS); + if (msix_sts_reg & HCLGE_VECTOR0_REG_MSIX_MASK) { + if (hclge_handle_hw_msix_error + (hdev, &hdev->default_reset_request)) + dev_info(dev, "received msix interrupt 0x%x\n", + msix_sts_reg); + } + + hclge_handle_hw_ras_error(ae_dev); + + hclge_handle_err_reset_request(hdev); +} + +static void hclge_errhand_service_task(struct hclge_dev *hdev) +{ + if (!test_and_clear_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state)) + return; + + if (hnae3_dev_ras_imp_supported(hdev)) + hclge_handle_err_recovery(hdev); + else + hclge_misc_err_recovery(hdev); +} + +static void hclge_reset_service_task(struct hclge_dev *hdev) +{ + if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) + return; + + if (time_is_before_jiffies(hdev->last_rst_scheduled + + HCLGE_RESET_SCHED_TIMEOUT)) + dev_warn(&hdev->pdev->dev, + "reset service task is scheduled after %ums on cpu%u!\n", + jiffies_to_msecs(jiffies - hdev->last_rst_scheduled), + smp_processor_id()); + + down(&hdev->reset_sem); + set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); + + hclge_reset_subtask(hdev); + + clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); + up(&hdev->reset_sem); +} + +static void hclge_update_vport_alive(struct hclge_dev *hdev) +{ +#define HCLGE_ALIVE_SECONDS_NORMAL 8 + + unsigned long alive_time = HCLGE_ALIVE_SECONDS_NORMAL * HZ; + int i; + + /* start from vport 1 for PF is always alive */ + for (i = 1; i < hdev->num_alloc_vport; i++) { + struct hclge_vport *vport = &hdev->vport[i]; + + if (!test_bit(HCLGE_VPORT_STATE_INITED, &vport->state) || + !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) + continue; + if (time_after(jiffies, vport->last_active_jiffies + + alive_time)) { + clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); + dev_warn(&hdev->pdev->dev, + "VF %u heartbeat timeout\n", + i - HCLGE_VF_VPORT_START_NUM); + } + } +} + +static void hclge_periodic_service_task(struct hclge_dev *hdev) +{ + unsigned long delta = round_jiffies_relative(HZ); + + if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) + return; + + /* Always handle the link updating to make sure link state is + * updated when it is triggered by mbx. + */ + hclge_update_link_status(hdev); + hclge_sync_mac_table(hdev); + hclge_sync_promisc_mode(hdev); + hclge_sync_fd_table(hdev); + + if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) { + delta = jiffies - hdev->last_serv_processed; + + if (delta < round_jiffies_relative(HZ)) { + delta = round_jiffies_relative(HZ) - delta; + goto out; + } + } + + hdev->serv_processed_cnt++; + hclge_update_vport_alive(hdev); + + if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) { + hdev->last_serv_processed = jiffies; + goto out; + } + + if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL)) + hclge_update_stats_for_all(hdev); + + hclge_update_port_info(hdev); + hclge_sync_vlan_filter(hdev); + + if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL)) + hclge_rfs_filter_expire(hdev); + + hdev->last_serv_processed = jiffies; + +out: + hclge_task_schedule(hdev, delta); +} + +static void hclge_ptp_service_task(struct hclge_dev *hdev) +{ + unsigned long flags; + + if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state) || + !test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state) || + !time_is_before_jiffies(hdev->ptp->tx_start + HZ)) + return; + + /* to prevent concurrence with the irq handler */ + spin_lock_irqsave(&hdev->ptp->lock, flags); + + /* check HCLGE_STATE_PTP_TX_HANDLING here again, since the irq + * handler may handle it just before spin_lock_irqsave(). + */ + if (test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state)) + hclge_ptp_clean_tx_hwts(hdev); + + spin_unlock_irqrestore(&hdev->ptp->lock, flags); +} + +static void hclge_service_task(struct work_struct *work) +{ + struct hclge_dev *hdev = + container_of(work, struct hclge_dev, service_task.work); + + hclge_errhand_service_task(hdev); + hclge_reset_service_task(hdev); + hclge_ptp_service_task(hdev); + hclge_mailbox_service_task(hdev); + hclge_periodic_service_task(hdev); + + /* Handle error recovery, reset and mbx again in case periodical task + * delays the handling by calling hclge_task_schedule() in + * hclge_periodic_service_task(). + */ + hclge_errhand_service_task(hdev); + hclge_reset_service_task(hdev); + hclge_mailbox_service_task(hdev); +} + +struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle) +{ + /* VF handle has no client */ + if (!handle->client) + return container_of(handle, struct hclge_vport, nic); + else if (handle->client->type == HNAE3_CLIENT_ROCE) + return container_of(handle, struct hclge_vport, roce); + else + return container_of(handle, struct hclge_vport, nic); +} + +static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx, + struct hnae3_vector_info *vector_info) +{ +#define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 64 + + vector_info->vector = pci_irq_vector(hdev->pdev, idx); + + /* need an extend offset to config vector >= 64 */ + if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2) + vector_info->io_addr = hdev->hw.hw.io_base + + HCLGE_VECTOR_REG_BASE + + (idx - 1) * HCLGE_VECTOR_REG_OFFSET; + else + vector_info->io_addr = hdev->hw.hw.io_base + + HCLGE_VECTOR_EXT_REG_BASE + + (idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 * + HCLGE_VECTOR_REG_OFFSET_H + + (idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 * + HCLGE_VECTOR_REG_OFFSET; + + hdev->vector_status[idx] = hdev->vport[0].vport_id; + hdev->vector_irq[idx] = vector_info->vector; +} + +static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num, + struct hnae3_vector_info *vector_info) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hnae3_vector_info *vector = vector_info; + struct hclge_dev *hdev = vport->back; + int alloc = 0; + u16 i = 0; + u16 j; + + vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num); + vector_num = min(hdev->num_msi_left, vector_num); + + for (j = 0; j < vector_num; j++) { + while (++i < hdev->num_nic_msi) { + if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) { + hclge_get_vector_info(hdev, i, vector); + vector++; + alloc++; + + break; + } + } + } + hdev->num_msi_left -= alloc; + hdev->num_msi_used += alloc; + + return alloc; +} + +static int hclge_get_vector_index(struct hclge_dev *hdev, int vector) +{ + int i; + + for (i = 0; i < hdev->num_msi; i++) + if (vector == hdev->vector_irq[i]) + return i; + + return -EINVAL; +} + +static int hclge_put_vector(struct hnae3_handle *handle, int vector) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + int vector_id; + + vector_id = hclge_get_vector_index(hdev, vector); + if (vector_id < 0) { + dev_err(&hdev->pdev->dev, + "Get vector index fail. vector = %d\n", vector); + return vector_id; + } + + hclge_free_vector(hdev, vector_id); + + return 0; +} + +static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir, + u8 *key, u8 *hfunc) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_comm_rss_cfg *rss_cfg = &vport->back->rss_cfg; + + hclge_comm_get_rss_hash_info(rss_cfg, key, hfunc); + + hclge_comm_get_rss_indir_tbl(rss_cfg, indir, + ae_dev->dev_specs.rss_ind_tbl_size); + + return 0; +} + +static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; + int ret, i; + + ret = hclge_comm_set_rss_hash_key(rss_cfg, &hdev->hw.hw, key, hfunc); + if (ret) { + dev_err(&hdev->pdev->dev, "invalid hfunc type %u\n", hfunc); + return ret; + } + + /* Update the shadow RSS table with user specified qids */ + for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++) + rss_cfg->rss_indirection_tbl[i] = indir[i]; + + /* Update the hardware */ + return hclge_comm_set_rss_indir_table(ae_dev, &hdev->hw.hw, + rss_cfg->rss_indirection_tbl); +} + +static int hclge_set_rss_tuple(struct hnae3_handle *handle, + struct ethtool_rxnfc *nfc) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + int ret; + + ret = hclge_comm_set_rss_tuple(hdev->ae_dev, &hdev->hw.hw, + &hdev->rss_cfg, nfc); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to set rss tuple, ret = %d.\n", ret); + return ret; + } + + return 0; +} + +static int hclge_get_rss_tuple(struct hnae3_handle *handle, + struct ethtool_rxnfc *nfc) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + u8 tuple_sets; + int ret; + + nfc->data = 0; + + ret = hclge_comm_get_rss_tuple(&vport->back->rss_cfg, nfc->flow_type, + &tuple_sets); + if (ret || !tuple_sets) + return ret; + + nfc->data = hclge_comm_convert_rss_tuple(tuple_sets); + + return 0; +} + +static int hclge_get_tc_size(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + return hdev->pf_rss_size_max; +} + +static int hclge_init_rss_tc_mode(struct hclge_dev *hdev) +{ + struct hnae3_ae_dev *ae_dev = hdev->ae_dev; + struct hclge_vport *vport = hdev->vport; + u16 tc_offset[HCLGE_MAX_TC_NUM] = {0}; + u16 tc_valid[HCLGE_MAX_TC_NUM] = {0}; + u16 tc_size[HCLGE_MAX_TC_NUM] = {0}; + struct hnae3_tc_info *tc_info; + u16 roundup_size; + u16 rss_size; + int i; + + tc_info = &vport->nic.kinfo.tc_info; + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { + rss_size = tc_info->tqp_count[i]; + tc_valid[i] = 0; + + if (!(hdev->hw_tc_map & BIT(i))) + continue; + + /* tc_size set to hardware is the log2 of roundup power of two + * of rss_size, the acutal queue size is limited by indirection + * table. + */ + if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size || + rss_size == 0) { + dev_err(&hdev->pdev->dev, + "Configure rss tc size failed, invalid TC_SIZE = %u\n", + rss_size); + return -EINVAL; + } + + roundup_size = roundup_pow_of_two(rss_size); + roundup_size = ilog2(roundup_size); + + tc_valid[i] = 1; + tc_size[i] = roundup_size; + tc_offset[i] = tc_info->tqp_offset[i]; + } + + return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, tc_valid, + tc_size); +} + +int hclge_rss_init_hw(struct hclge_dev *hdev) +{ + u16 *rss_indir = hdev->rss_cfg.rss_indirection_tbl; + u8 *key = hdev->rss_cfg.rss_hash_key; + u8 hfunc = hdev->rss_cfg.rss_algo; + int ret; + + ret = hclge_comm_set_rss_indir_table(hdev->ae_dev, &hdev->hw.hw, + rss_indir); + if (ret) + return ret; + + ret = hclge_comm_set_rss_algo_key(&hdev->hw.hw, hfunc, key); + if (ret) + return ret; + + ret = hclge_comm_set_rss_input_tuple(&hdev->vport[0].nic, + &hdev->hw.hw, true, + &hdev->rss_cfg); + if (ret) + return ret; + + return hclge_init_rss_tc_mode(hdev); +} + +int hclge_bind_ring_with_vector(struct hclge_vport *vport, + int vector_id, bool en, + struct hnae3_ring_chain_node *ring_chain) +{ + struct hclge_dev *hdev = vport->back; + struct hnae3_ring_chain_node *node; + struct hclge_desc desc; + struct hclge_ctrl_vector_chain_cmd *req = + (struct hclge_ctrl_vector_chain_cmd *)desc.data; + enum hclge_comm_cmd_status status; + enum hclge_opcode_type op; + u16 tqp_type_and_id; + int i; + + op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR; + hclge_cmd_setup_basic_desc(&desc, op, false); + req->int_vector_id_l = hnae3_get_field(vector_id, + HCLGE_VECTOR_ID_L_M, + HCLGE_VECTOR_ID_L_S); + req->int_vector_id_h = hnae3_get_field(vector_id, + HCLGE_VECTOR_ID_H_M, + HCLGE_VECTOR_ID_H_S); + + i = 0; + for (node = ring_chain; node; node = node->next) { + tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]); + hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M, + HCLGE_INT_TYPE_S, + hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B)); + hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M, + HCLGE_TQP_ID_S, node->tqp_index); + hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M, + HCLGE_INT_GL_IDX_S, + hnae3_get_field(node->int_gl_idx, + HNAE3_RING_GL_IDX_M, + HNAE3_RING_GL_IDX_S)); + req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id); + if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) { + req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD; + req->vfid = vport->vport_id; + + status = hclge_cmd_send(&hdev->hw, &desc, 1); + if (status) { + dev_err(&hdev->pdev->dev, + "Map TQP fail, status is %d.\n", + status); + return -EIO; + } + i = 0; + + hclge_cmd_setup_basic_desc(&desc, + op, + false); + req->int_vector_id_l = + hnae3_get_field(vector_id, + HCLGE_VECTOR_ID_L_M, + HCLGE_VECTOR_ID_L_S); + req->int_vector_id_h = + hnae3_get_field(vector_id, + HCLGE_VECTOR_ID_H_M, + HCLGE_VECTOR_ID_H_S); + } + } + + if (i > 0) { + req->int_cause_num = i; + req->vfid = vport->vport_id; + status = hclge_cmd_send(&hdev->hw, &desc, 1); + if (status) { + dev_err(&hdev->pdev->dev, + "Map TQP fail, status is %d.\n", status); + return -EIO; + } + } + + return 0; +} + +static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector, + struct hnae3_ring_chain_node *ring_chain) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + int vector_id; + + vector_id = hclge_get_vector_index(hdev, vector); + if (vector_id < 0) { + dev_err(&hdev->pdev->dev, + "failed to get vector index. vector=%d\n", vector); + return vector_id; + } + + return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain); +} + +static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector, + struct hnae3_ring_chain_node *ring_chain) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + int vector_id, ret; + + if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) + return 0; + + vector_id = hclge_get_vector_index(hdev, vector); + if (vector_id < 0) { + dev_err(&handle->pdev->dev, + "Get vector index fail. ret =%d\n", vector_id); + return vector_id; + } + + ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain); + if (ret) + dev_err(&handle->pdev->dev, + "Unmap ring from vector fail. vectorid=%d, ret =%d\n", + vector_id, ret); + + return ret; +} + +static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id, + bool en_uc, bool en_mc, bool en_bc) +{ + struct hclge_vport *vport = &hdev->vport[vf_id]; + struct hnae3_handle *handle = &vport->nic; + struct hclge_promisc_cfg_cmd *req; + struct hclge_desc desc; + bool uc_tx_en = en_uc; + u8 promisc_cfg = 0; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false); + + req = (struct hclge_promisc_cfg_cmd *)desc.data; + req->vf_id = vf_id; + + if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags)) + uc_tx_en = false; + + hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_RX_EN, en_uc ? 1 : 0); + hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_RX_EN, en_mc ? 1 : 0); + hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_RX_EN, en_bc ? 1 : 0); + hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_TX_EN, uc_tx_en ? 1 : 0); + hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_TX_EN, en_mc ? 1 : 0); + hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_TX_EN, en_bc ? 1 : 0); + req->extend_promisc = promisc_cfg; + + /* to be compatible with DEVICE_VERSION_V1/2 */ + promisc_cfg = 0; + hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_UC, en_uc ? 1 : 0); + hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_MC, en_mc ? 1 : 0); + hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_BC, en_bc ? 1 : 0); + hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_TX_EN, 1); + hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_RX_EN, 1); + req->promisc = promisc_cfg; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to set vport %u promisc mode, ret = %d.\n", + vf_id, ret); + + return ret; +} + +int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc, + bool en_mc_pmc, bool en_bc_pmc) +{ + return hclge_cmd_set_promisc_mode(vport->back, vport->vport_id, + en_uc_pmc, en_mc_pmc, en_bc_pmc); +} + +static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, + bool en_mc_pmc) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + bool en_bc_pmc = true; + + /* For device whose version below V2, if broadcast promisc enabled, + * vlan filter is always bypassed. So broadcast promisc should be + * disabled until user enable promisc mode + */ + if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) + en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false; + + return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc, + en_bc_pmc); +} + +static void hclge_request_update_promisc_mode(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + + set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state); +} + +static void hclge_sync_fd_state(struct hclge_dev *hdev) +{ + if (hlist_empty(&hdev->fd_rule_list)) + hdev->fd_active_type = HCLGE_FD_RULE_NONE; +} + +static void hclge_fd_inc_rule_cnt(struct hclge_dev *hdev, u16 location) +{ + if (!test_bit(location, hdev->fd_bmap)) { + set_bit(location, hdev->fd_bmap); + hdev->hclge_fd_rule_num++; + } +} + +static void hclge_fd_dec_rule_cnt(struct hclge_dev *hdev, u16 location) +{ + if (test_bit(location, hdev->fd_bmap)) { + clear_bit(location, hdev->fd_bmap); + hdev->hclge_fd_rule_num--; + } +} + +static void hclge_fd_free_node(struct hclge_dev *hdev, + struct hclge_fd_rule *rule) +{ + hlist_del(&rule->rule_node); + kfree(rule); + hclge_sync_fd_state(hdev); +} + +static void hclge_update_fd_rule_node(struct hclge_dev *hdev, + struct hclge_fd_rule *old_rule, + struct hclge_fd_rule *new_rule, + enum HCLGE_FD_NODE_STATE state) +{ + switch (state) { + case HCLGE_FD_TO_ADD: + case HCLGE_FD_ACTIVE: + /* 1) if the new state is TO_ADD, just replace the old rule + * with the same location, no matter its state, because the + * new rule will be configured to the hardware. + * 2) if the new state is ACTIVE, it means the new rule + * has been configured to the hardware, so just replace + * the old rule node with the same location. + * 3) for it doesn't add a new node to the list, so it's + * unnecessary to update the rule number and fd_bmap. + */ + new_rule->rule_node.next = old_rule->rule_node.next; + new_rule->rule_node.pprev = old_rule->rule_node.pprev; + memcpy(old_rule, new_rule, sizeof(*old_rule)); + kfree(new_rule); + break; + case HCLGE_FD_DELETED: + hclge_fd_dec_rule_cnt(hdev, old_rule->location); + hclge_fd_free_node(hdev, old_rule); + break; + case HCLGE_FD_TO_DEL: + /* if new request is TO_DEL, and old rule is existent + * 1) the state of old rule is TO_DEL, we need do nothing, + * because we delete rule by location, other rule content + * is unncessary. + * 2) the state of old rule is ACTIVE, we need to change its + * state to TO_DEL, so the rule will be deleted when periodic + * task being scheduled. + * 3) the state of old rule is TO_ADD, it means the rule hasn't + * been added to hardware, so we just delete the rule node from + * fd_rule_list directly. + */ + if (old_rule->state == HCLGE_FD_TO_ADD) { + hclge_fd_dec_rule_cnt(hdev, old_rule->location); + hclge_fd_free_node(hdev, old_rule); + return; + } + old_rule->state = HCLGE_FD_TO_DEL; + break; + } +} + +static struct hclge_fd_rule *hclge_find_fd_rule(struct hlist_head *hlist, + u16 location, + struct hclge_fd_rule **parent) +{ + struct hclge_fd_rule *rule; + struct hlist_node *node; + + hlist_for_each_entry_safe(rule, node, hlist, rule_node) { + if (rule->location == location) + return rule; + else if (rule->location > location) + return NULL; + /* record the parent node, use to keep the nodes in fd_rule_list + * in ascend order. + */ + *parent = rule; + } + + return NULL; +} + +/* insert fd rule node in ascend order according to rule->location */ +static void hclge_fd_insert_rule_node(struct hlist_head *hlist, + struct hclge_fd_rule *rule, + struct hclge_fd_rule *parent) +{ + INIT_HLIST_NODE(&rule->rule_node); + + if (parent) + hlist_add_behind(&rule->rule_node, &parent->rule_node); + else + hlist_add_head(&rule->rule_node, hlist); +} + +static int hclge_fd_set_user_def_cmd(struct hclge_dev *hdev, + struct hclge_fd_user_def_cfg *cfg) +{ + struct hclge_fd_user_def_cfg_cmd *req; + struct hclge_desc desc; + u16 data = 0; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_USER_DEF_OP, false); + + req = (struct hclge_fd_user_def_cfg_cmd *)desc.data; + + hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[0].ref_cnt > 0); + hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M, + HCLGE_FD_USER_DEF_OFT_S, cfg[0].offset); + req->ol2_cfg = cpu_to_le16(data); + + data = 0; + hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[1].ref_cnt > 0); + hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M, + HCLGE_FD_USER_DEF_OFT_S, cfg[1].offset); + req->ol3_cfg = cpu_to_le16(data); + + data = 0; + hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[2].ref_cnt > 0); + hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M, + HCLGE_FD_USER_DEF_OFT_S, cfg[2].offset); + req->ol4_cfg = cpu_to_le16(data); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to set fd user def data, ret= %d\n", ret); + return ret; +} + +static void hclge_sync_fd_user_def_cfg(struct hclge_dev *hdev, bool locked) +{ + int ret; + + if (!test_and_clear_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state)) + return; + + if (!locked) + spin_lock_bh(&hdev->fd_rule_lock); + + ret = hclge_fd_set_user_def_cmd(hdev, hdev->fd_cfg.user_def_cfg); + if (ret) + set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state); + + if (!locked) + spin_unlock_bh(&hdev->fd_rule_lock); +} + +static int hclge_fd_check_user_def_refcnt(struct hclge_dev *hdev, + struct hclge_fd_rule *rule) +{ + struct hlist_head *hlist = &hdev->fd_rule_list; + struct hclge_fd_rule *fd_rule, *parent = NULL; + struct hclge_fd_user_def_info *info, *old_info; + struct hclge_fd_user_def_cfg *cfg; + + if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE || + rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE) + return 0; + + /* for valid layer is start from 1, so need minus 1 to get the cfg */ + cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1]; + info = &rule->ep.user_def; + + if (!cfg->ref_cnt || cfg->offset == info->offset) + return 0; + + if (cfg->ref_cnt > 1) + goto error; + + fd_rule = hclge_find_fd_rule(hlist, rule->location, &parent); + if (fd_rule) { + old_info = &fd_rule->ep.user_def; + if (info->layer == old_info->layer) + return 0; + } + +error: + dev_err(&hdev->pdev->dev, + "No available offset for layer%d fd rule, each layer only support one user def offset.\n", + info->layer + 1); + return -ENOSPC; +} + +static void hclge_fd_inc_user_def_refcnt(struct hclge_dev *hdev, + struct hclge_fd_rule *rule) +{ + struct hclge_fd_user_def_cfg *cfg; + + if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE || + rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE) + return; + + cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1]; + if (!cfg->ref_cnt) { + cfg->offset = rule->ep.user_def.offset; + set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state); + } + cfg->ref_cnt++; +} + +static void hclge_fd_dec_user_def_refcnt(struct hclge_dev *hdev, + struct hclge_fd_rule *rule) +{ + struct hclge_fd_user_def_cfg *cfg; + + if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE || + rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE) + return; + + cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1]; + if (!cfg->ref_cnt) + return; + + cfg->ref_cnt--; + if (!cfg->ref_cnt) { + cfg->offset = 0; + set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state); + } +} + +static void hclge_update_fd_list(struct hclge_dev *hdev, + enum HCLGE_FD_NODE_STATE state, u16 location, + struct hclge_fd_rule *new_rule) +{ + struct hlist_head *hlist = &hdev->fd_rule_list; + struct hclge_fd_rule *fd_rule, *parent = NULL; + + fd_rule = hclge_find_fd_rule(hlist, location, &parent); + if (fd_rule) { + hclge_fd_dec_user_def_refcnt(hdev, fd_rule); + if (state == HCLGE_FD_ACTIVE) + hclge_fd_inc_user_def_refcnt(hdev, new_rule); + hclge_sync_fd_user_def_cfg(hdev, true); + + hclge_update_fd_rule_node(hdev, fd_rule, new_rule, state); + return; + } + + /* it's unlikely to fail here, because we have checked the rule + * exist before. + */ + if (unlikely(state == HCLGE_FD_TO_DEL || state == HCLGE_FD_DELETED)) { + dev_warn(&hdev->pdev->dev, + "failed to delete fd rule %u, it's inexistent\n", + location); + return; + } + + hclge_fd_inc_user_def_refcnt(hdev, new_rule); + hclge_sync_fd_user_def_cfg(hdev, true); + + hclge_fd_insert_rule_node(hlist, new_rule, parent); + hclge_fd_inc_rule_cnt(hdev, new_rule->location); + + if (state == HCLGE_FD_TO_ADD) { + set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); + hclge_task_schedule(hdev, 0); + } +} + +static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode) +{ + struct hclge_get_fd_mode_cmd *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true); + + req = (struct hclge_get_fd_mode_cmd *)desc.data; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret); + return ret; + } + + *fd_mode = req->mode; + + return ret; +} + +static int hclge_get_fd_allocation(struct hclge_dev *hdev, + u32 *stage1_entry_num, + u32 *stage2_entry_num, + u16 *stage1_counter_num, + u16 *stage2_counter_num) +{ + struct hclge_get_fd_allocation_cmd *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true); + + req = (struct hclge_get_fd_allocation_cmd *)desc.data; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n", + ret); + return ret; + } + + *stage1_entry_num = le32_to_cpu(req->stage1_entry_num); + *stage2_entry_num = le32_to_cpu(req->stage2_entry_num); + *stage1_counter_num = le16_to_cpu(req->stage1_counter_num); + *stage2_counter_num = le16_to_cpu(req->stage2_counter_num); + + return ret; +} + +static int hclge_set_fd_key_config(struct hclge_dev *hdev, + enum HCLGE_FD_STAGE stage_num) +{ + struct hclge_set_fd_key_config_cmd *req; + struct hclge_fd_key_cfg *stage; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false); + + req = (struct hclge_set_fd_key_config_cmd *)desc.data; + stage = &hdev->fd_cfg.key_cfg[stage_num]; + req->stage = stage_num; + req->key_select = stage->key_sel; + req->inner_sipv6_word_en = stage->inner_sipv6_word_en; + req->inner_dipv6_word_en = stage->inner_dipv6_word_en; + req->outer_sipv6_word_en = stage->outer_sipv6_word_en; + req->outer_dipv6_word_en = stage->outer_dipv6_word_en; + req->tuple_mask = cpu_to_le32(~stage->tuple_active); + req->meta_data_mask = cpu_to_le32(~stage->meta_data_active); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret); + + return ret; +} + +static void hclge_fd_disable_user_def(struct hclge_dev *hdev) +{ + struct hclge_fd_user_def_cfg *cfg = hdev->fd_cfg.user_def_cfg; + + spin_lock_bh(&hdev->fd_rule_lock); + memset(cfg, 0, sizeof(hdev->fd_cfg.user_def_cfg)); + spin_unlock_bh(&hdev->fd_rule_lock); + + hclge_fd_set_user_def_cmd(hdev, cfg); +} + +static int hclge_init_fd_config(struct hclge_dev *hdev) +{ +#define LOW_2_WORDS 0x03 + struct hclge_fd_key_cfg *key_cfg; + int ret; + + if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) + return 0; + + ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode); + if (ret) + return ret; + + switch (hdev->fd_cfg.fd_mode) { + case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1: + hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH; + break; + case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1: + hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2; + break; + default: + dev_err(&hdev->pdev->dev, + "Unsupported flow director mode %u\n", + hdev->fd_cfg.fd_mode); + return -EOPNOTSUPP; + } + + key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1]; + key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE; + key_cfg->inner_sipv6_word_en = LOW_2_WORDS; + key_cfg->inner_dipv6_word_en = LOW_2_WORDS; + key_cfg->outer_sipv6_word_en = 0; + key_cfg->outer_dipv6_word_en = 0; + + key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) | + BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) | + BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) | + BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT); + + /* If use max 400bit key, we can support tuples for ether type */ + if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) { + key_cfg->tuple_active |= + BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC); + if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) + key_cfg->tuple_active |= HCLGE_FD_TUPLE_USER_DEF_TUPLES; + } + + /* roce_type is used to filter roce frames + * dst_vport is used to specify the rule + */ + key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT); + + ret = hclge_get_fd_allocation(hdev, + &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1], + &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2], + &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1], + &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]); + if (ret) + return ret; + + return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1); +} + +static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x, + int loc, u8 *key, bool is_add) +{ + struct hclge_fd_tcam_config_1_cmd *req1; + struct hclge_fd_tcam_config_2_cmd *req2; + struct hclge_fd_tcam_config_3_cmd *req3; + struct hclge_desc desc[3]; + int ret; + + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false); + desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false); + desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false); + + req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data; + req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data; + req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data; + + req1->stage = stage; + req1->xy_sel = sel_x ? 1 : 0; + hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0); + req1->index = cpu_to_le32(loc); + req1->entry_vld = sel_x ? is_add : 0; + + if (key) { + memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data)); + memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)], + sizeof(req2->tcam_data)); + memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) + + sizeof(req2->tcam_data)], sizeof(req3->tcam_data)); + } + + ret = hclge_cmd_send(&hdev->hw, desc, 3); + if (ret) + dev_err(&hdev->pdev->dev, + "config tcam key fail, ret=%d\n", + ret); + + return ret; +} + +static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc, + struct hclge_fd_ad_data *action) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); + struct hclge_fd_ad_config_cmd *req; + struct hclge_desc desc; + u64 ad_data = 0; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false); + + req = (struct hclge_fd_ad_config_cmd *)desc.data; + req->index = cpu_to_le32(loc); + req->stage = stage; + + hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B, + action->write_rule_id_to_bd); + hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S, + action->rule_id); + if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) { + hnae3_set_bit(ad_data, HCLGE_FD_AD_TC_OVRD_B, + action->override_tc); + hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M, + HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size); + } + ad_data <<= 32; + hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet); + hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B, + action->forward_to_direct_queue); + hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S, + action->queue_id); + hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter); + hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M, + HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id); + hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage); + hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S, + action->counter_id); + + req->ad_data = cpu_to_le64(ad_data); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret); + + return ret; +} + +static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y, + struct hclge_fd_rule *rule) +{ + int offset, moffset, ip_offset; + enum HCLGE_FD_KEY_OPT key_opt; + u16 tmp_x_s, tmp_y_s; + u32 tmp_x_l, tmp_y_l; + u8 *p = (u8 *)rule; + int i; + + if (rule->unused_tuple & BIT(tuple_bit)) + return true; + + key_opt = tuple_key_info[tuple_bit].key_opt; + offset = tuple_key_info[tuple_bit].offset; + moffset = tuple_key_info[tuple_bit].moffset; + + switch (key_opt) { + case KEY_OPT_U8: + calc_x(*key_x, p[offset], p[moffset]); + calc_y(*key_y, p[offset], p[moffset]); + + return true; + case KEY_OPT_LE16: + calc_x(tmp_x_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset])); + calc_y(tmp_y_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset])); + *(__le16 *)key_x = cpu_to_le16(tmp_x_s); + *(__le16 *)key_y = cpu_to_le16(tmp_y_s); + + return true; + case KEY_OPT_LE32: + calc_x(tmp_x_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset])); + calc_y(tmp_y_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset])); + *(__le32 *)key_x = cpu_to_le32(tmp_x_l); + *(__le32 *)key_y = cpu_to_le32(tmp_y_l); + + return true; + case KEY_OPT_MAC: + for (i = 0; i < ETH_ALEN; i++) { + calc_x(key_x[ETH_ALEN - 1 - i], p[offset + i], + p[moffset + i]); + calc_y(key_y[ETH_ALEN - 1 - i], p[offset + i], + p[moffset + i]); + } + + return true; + case KEY_OPT_IP: + ip_offset = IPV4_INDEX * sizeof(u32); + calc_x(tmp_x_l, *(u32 *)(&p[offset + ip_offset]), + *(u32 *)(&p[moffset + ip_offset])); + calc_y(tmp_y_l, *(u32 *)(&p[offset + ip_offset]), + *(u32 *)(&p[moffset + ip_offset])); + *(__le32 *)key_x = cpu_to_le32(tmp_x_l); + *(__le32 *)key_y = cpu_to_le32(tmp_y_l); + + return true; + default: + return false; + } +} + +static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id, + u8 vf_id, u8 network_port_id) +{ + u32 port_number = 0; + + if (port_type == HOST_PORT) { + hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S, + pf_id); + hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S, + vf_id); + hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT); + } else { + hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M, + HCLGE_NETWORK_PORT_ID_S, network_port_id); + hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT); + } + + return port_number; +} + +static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg, + __le32 *key_x, __le32 *key_y, + struct hclge_fd_rule *rule) +{ + u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number; + u8 cur_pos = 0, tuple_size, shift_bits; + unsigned int i; + + for (i = 0; i < MAX_META_DATA; i++) { + tuple_size = meta_data_key_info[i].key_length; + tuple_bit = key_cfg->meta_data_active & BIT(i); + + switch (tuple_bit) { + case BIT(ROCE_TYPE): + hnae3_set_bit(meta_data, cur_pos, NIC_PACKET); + cur_pos += tuple_size; + break; + case BIT(DST_VPORT): + port_number = hclge_get_port_number(HOST_PORT, 0, + rule->vf_id, 0); + hnae3_set_field(meta_data, + GENMASK(cur_pos + tuple_size, cur_pos), + cur_pos, port_number); + cur_pos += tuple_size; + break; + default: + break; + } + } + + calc_x(tmp_x, meta_data, 0xFFFFFFFF); + calc_y(tmp_y, meta_data, 0xFFFFFFFF); + shift_bits = sizeof(meta_data) * 8 - cur_pos; + + *key_x = cpu_to_le32(tmp_x << shift_bits); + *key_y = cpu_to_le32(tmp_y << shift_bits); +} + +/* A complete key is combined with meta data key and tuple key. + * Meta data key is stored at the MSB region, and tuple key is stored at + * the LSB region, unused bits will be filled 0. + */ +static int hclge_config_key(struct hclge_dev *hdev, u8 stage, + struct hclge_fd_rule *rule) +{ + struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage]; + u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES]; + u8 *cur_key_x, *cur_key_y; + u8 meta_data_region; + u8 tuple_size; + int ret; + u32 i; + + memset(key_x, 0, sizeof(key_x)); + memset(key_y, 0, sizeof(key_y)); + cur_key_x = key_x; + cur_key_y = key_y; + + for (i = 0; i < MAX_TUPLE; i++) { + bool tuple_valid; + + tuple_size = tuple_key_info[i].key_length / 8; + if (!(key_cfg->tuple_active & BIT(i))) + continue; + + tuple_valid = hclge_fd_convert_tuple(i, cur_key_x, + cur_key_y, rule); + if (tuple_valid) { + cur_key_x += tuple_size; + cur_key_y += tuple_size; + } + } + + meta_data_region = hdev->fd_cfg.max_key_length / 8 - + MAX_META_DATA_LENGTH / 8; + + hclge_fd_convert_meta_data(key_cfg, + (__le32 *)(key_x + meta_data_region), + (__le32 *)(key_y + meta_data_region), + rule); + + ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y, + true); + if (ret) { + dev_err(&hdev->pdev->dev, + "fd key_y config fail, loc=%u, ret=%d\n", + rule->queue_id, ret); + return ret; + } + + ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x, + true); + if (ret) + dev_err(&hdev->pdev->dev, + "fd key_x config fail, loc=%u, ret=%d\n", + rule->queue_id, ret); + return ret; +} + +static int hclge_config_action(struct hclge_dev *hdev, u8 stage, + struct hclge_fd_rule *rule) +{ + struct hclge_vport *vport = hdev->vport; + struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; + struct hclge_fd_ad_data ad_data; + + memset(&ad_data, 0, sizeof(struct hclge_fd_ad_data)); + ad_data.ad_id = rule->location; + + if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) { + ad_data.drop_packet = true; + } else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) { + ad_data.override_tc = true; + ad_data.queue_id = + kinfo->tc_info.tqp_offset[rule->cls_flower.tc]; + ad_data.tc_size = + ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]); + } else { + ad_data.forward_to_direct_queue = true; + ad_data.queue_id = rule->queue_id; + } + + if (hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1]) { + ad_data.use_counter = true; + ad_data.counter_id = rule->vf_id % + hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1]; + } else { + ad_data.use_counter = false; + ad_data.counter_id = 0; + } + + ad_data.use_next_stage = false; + ad_data.next_input_key = 0; + + ad_data.write_rule_id_to_bd = true; + ad_data.rule_id = rule->location; + + return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data); +} + +static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec, + u32 *unused_tuple) +{ + if (!spec || !unused_tuple) + return -EINVAL; + + *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC); + + if (!spec->ip4src) + *unused_tuple |= BIT(INNER_SRC_IP); + + if (!spec->ip4dst) + *unused_tuple |= BIT(INNER_DST_IP); + + if (!spec->psrc) + *unused_tuple |= BIT(INNER_SRC_PORT); + + if (!spec->pdst) + *unused_tuple |= BIT(INNER_DST_PORT); + + if (!spec->tos) + *unused_tuple |= BIT(INNER_IP_TOS); + + return 0; +} + +static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec, + u32 *unused_tuple) +{ + if (!spec || !unused_tuple) + return -EINVAL; + + *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | + BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT); + + if (!spec->ip4src) + *unused_tuple |= BIT(INNER_SRC_IP); + + if (!spec->ip4dst) + *unused_tuple |= BIT(INNER_DST_IP); + + if (!spec->tos) + *unused_tuple |= BIT(INNER_IP_TOS); + + if (!spec->proto) + *unused_tuple |= BIT(INNER_IP_PROTO); + + if (spec->l4_4_bytes) + return -EOPNOTSUPP; + + if (spec->ip_ver != ETH_RX_NFC_IP4) + return -EOPNOTSUPP; + + return 0; +} + +static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec, + u32 *unused_tuple) +{ + if (!spec || !unused_tuple) + return -EINVAL; + + *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC); + + /* check whether src/dst ip address used */ + if (ipv6_addr_any((struct in6_addr *)spec->ip6src)) + *unused_tuple |= BIT(INNER_SRC_IP); + + if (ipv6_addr_any((struct in6_addr *)spec->ip6dst)) + *unused_tuple |= BIT(INNER_DST_IP); + + if (!spec->psrc) + *unused_tuple |= BIT(INNER_SRC_PORT); + + if (!spec->pdst) + *unused_tuple |= BIT(INNER_DST_PORT); + + if (!spec->tclass) + *unused_tuple |= BIT(INNER_IP_TOS); + + return 0; +} + +static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec, + u32 *unused_tuple) +{ + if (!spec || !unused_tuple) + return -EINVAL; + + *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | + BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT); + + /* check whether src/dst ip address used */ + if (ipv6_addr_any((struct in6_addr *)spec->ip6src)) + *unused_tuple |= BIT(INNER_SRC_IP); + + if (ipv6_addr_any((struct in6_addr *)spec->ip6dst)) + *unused_tuple |= BIT(INNER_DST_IP); + + if (!spec->l4_proto) + *unused_tuple |= BIT(INNER_IP_PROTO); + + if (!spec->tclass) + *unused_tuple |= BIT(INNER_IP_TOS); + + if (spec->l4_4_bytes) + return -EOPNOTSUPP; + + return 0; +} + +static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple) +{ + if (!spec || !unused_tuple) + return -EINVAL; + + *unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) | + BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) | + BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO); + + if (is_zero_ether_addr(spec->h_source)) + *unused_tuple |= BIT(INNER_SRC_MAC); + + if (is_zero_ether_addr(spec->h_dest)) + *unused_tuple |= BIT(INNER_DST_MAC); + + if (!spec->h_proto) + *unused_tuple |= BIT(INNER_ETH_TYPE); + + return 0; +} + +static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev, + struct ethtool_rx_flow_spec *fs, + u32 *unused_tuple) +{ + if (fs->flow_type & FLOW_EXT) { + if (fs->h_ext.vlan_etype) { + dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n"); + return -EOPNOTSUPP; + } + + if (!fs->h_ext.vlan_tci) + *unused_tuple |= BIT(INNER_VLAN_TAG_FST); + + if (fs->m_ext.vlan_tci && + be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) { + dev_err(&hdev->pdev->dev, + "failed to config vlan_tci, invalid vlan_tci: %u, max is %d.\n", + ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1); + return -EINVAL; + } + } else { + *unused_tuple |= BIT(INNER_VLAN_TAG_FST); + } + + if (fs->flow_type & FLOW_MAC_EXT) { + if (hdev->fd_cfg.fd_mode != + HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) { + dev_err(&hdev->pdev->dev, + "FLOW_MAC_EXT is not supported in current fd mode!\n"); + return -EOPNOTSUPP; + } + + if (is_zero_ether_addr(fs->h_ext.h_dest)) + *unused_tuple |= BIT(INNER_DST_MAC); + else + *unused_tuple &= ~BIT(INNER_DST_MAC); + } + + return 0; +} + +static int hclge_fd_get_user_def_layer(u32 flow_type, u32 *unused_tuple, + struct hclge_fd_user_def_info *info) +{ + switch (flow_type) { + case ETHER_FLOW: + info->layer = HCLGE_FD_USER_DEF_L2; + *unused_tuple &= ~BIT(INNER_L2_RSV); + break; + case IP_USER_FLOW: + case IPV6_USER_FLOW: + info->layer = HCLGE_FD_USER_DEF_L3; + *unused_tuple &= ~BIT(INNER_L3_RSV); + break; + case TCP_V4_FLOW: + case UDP_V4_FLOW: + case TCP_V6_FLOW: + case UDP_V6_FLOW: + info->layer = HCLGE_FD_USER_DEF_L4; + *unused_tuple &= ~BIT(INNER_L4_RSV); + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static bool hclge_fd_is_user_def_all_masked(struct ethtool_rx_flow_spec *fs) +{ + return be32_to_cpu(fs->m_ext.data[1] | fs->m_ext.data[0]) == 0; +} + +static int hclge_fd_parse_user_def_field(struct hclge_dev *hdev, + struct ethtool_rx_flow_spec *fs, + u32 *unused_tuple, + struct hclge_fd_user_def_info *info) +{ + u32 tuple_active = hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1].tuple_active; + u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT); + u16 data, offset, data_mask, offset_mask; + int ret; + + info->layer = HCLGE_FD_USER_DEF_NONE; + *unused_tuple |= HCLGE_FD_TUPLE_USER_DEF_TUPLES; + + if (!(fs->flow_type & FLOW_EXT) || hclge_fd_is_user_def_all_masked(fs)) + return 0; + + /* user-def data from ethtool is 64 bit value, the bit0~15 is used + * for data, and bit32~47 is used for offset. + */ + data = be32_to_cpu(fs->h_ext.data[1]) & HCLGE_FD_USER_DEF_DATA; + data_mask = be32_to_cpu(fs->m_ext.data[1]) & HCLGE_FD_USER_DEF_DATA; + offset = be32_to_cpu(fs->h_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET; + offset_mask = be32_to_cpu(fs->m_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET; + + if (!(tuple_active & HCLGE_FD_TUPLE_USER_DEF_TUPLES)) { + dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n"); + return -EOPNOTSUPP; + } + + if (offset > HCLGE_FD_MAX_USER_DEF_OFFSET) { + dev_err(&hdev->pdev->dev, + "user-def offset[%u] should be no more than %u\n", + offset, HCLGE_FD_MAX_USER_DEF_OFFSET); + return -EINVAL; + } + + if (offset_mask != HCLGE_FD_USER_DEF_OFFSET_UNMASK) { + dev_err(&hdev->pdev->dev, "user-def offset can't be masked\n"); + return -EINVAL; + } + + ret = hclge_fd_get_user_def_layer(flow_type, unused_tuple, info); + if (ret) { + dev_err(&hdev->pdev->dev, + "unsupported flow type for user-def bytes, ret = %d\n", + ret); + return ret; + } + + info->data = data; + info->data_mask = data_mask; + info->offset = offset; + + return 0; +} + +static int hclge_fd_check_spec(struct hclge_dev *hdev, + struct ethtool_rx_flow_spec *fs, + u32 *unused_tuple, + struct hclge_fd_user_def_info *info) +{ + u32 flow_type; + int ret; + + if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) { + dev_err(&hdev->pdev->dev, + "failed to config fd rules, invalid rule location: %u, max is %u\n.", + fs->location, + hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1); + return -EINVAL; + } + + ret = hclge_fd_parse_user_def_field(hdev, fs, unused_tuple, info); + if (ret) + return ret; + + flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT); + switch (flow_type) { + case SCTP_V4_FLOW: + case TCP_V4_FLOW: + case UDP_V4_FLOW: + ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec, + unused_tuple); + break; + case IP_USER_FLOW: + ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec, + unused_tuple); + break; + case SCTP_V6_FLOW: + case TCP_V6_FLOW: + case UDP_V6_FLOW: + ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec, + unused_tuple); + break; + case IPV6_USER_FLOW: + ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec, + unused_tuple); + break; + case ETHER_FLOW: + if (hdev->fd_cfg.fd_mode != + HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) { + dev_err(&hdev->pdev->dev, + "ETHER_FLOW is not supported in current fd mode!\n"); + return -EOPNOTSUPP; + } + + ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec, + unused_tuple); + break; + default: + dev_err(&hdev->pdev->dev, + "unsupported protocol type, protocol type = %#x\n", + flow_type); + return -EOPNOTSUPP; + } + + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to check flow union tuple, ret = %d\n", + ret); + return ret; + } + + return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple); +} + +static void hclge_fd_get_tcpip4_tuple(struct hclge_dev *hdev, + struct ethtool_rx_flow_spec *fs, + struct hclge_fd_rule *rule, u8 ip_proto) +{ + rule->tuples.src_ip[IPV4_INDEX] = + be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src); + rule->tuples_mask.src_ip[IPV4_INDEX] = + be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src); + + rule->tuples.dst_ip[IPV4_INDEX] = + be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst); + rule->tuples_mask.dst_ip[IPV4_INDEX] = + be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst); + + rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc); + rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc); + + rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst); + rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst); + + rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos; + rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos; + + rule->tuples.ether_proto = ETH_P_IP; + rule->tuples_mask.ether_proto = 0xFFFF; + + rule->tuples.ip_proto = ip_proto; + rule->tuples_mask.ip_proto = 0xFF; +} + +static void hclge_fd_get_ip4_tuple(struct hclge_dev *hdev, + struct ethtool_rx_flow_spec *fs, + struct hclge_fd_rule *rule) +{ + rule->tuples.src_ip[IPV4_INDEX] = + be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src); + rule->tuples_mask.src_ip[IPV4_INDEX] = + be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src); + + rule->tuples.dst_ip[IPV4_INDEX] = + be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst); + rule->tuples_mask.dst_ip[IPV4_INDEX] = + be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst); + + rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos; + rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos; + + rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto; + rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto; + + rule->tuples.ether_proto = ETH_P_IP; + rule->tuples_mask.ether_proto = 0xFFFF; +} + +static void hclge_fd_get_tcpip6_tuple(struct hclge_dev *hdev, + struct ethtool_rx_flow_spec *fs, + struct hclge_fd_rule *rule, u8 ip_proto) +{ + be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.tcp_ip6_spec.ip6src, + IPV6_SIZE); + be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.tcp_ip6_spec.ip6src, + IPV6_SIZE); + + be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.tcp_ip6_spec.ip6dst, + IPV6_SIZE); + be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.tcp_ip6_spec.ip6dst, + IPV6_SIZE); + + rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc); + rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc); + + rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst); + rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst); + + rule->tuples.ether_proto = ETH_P_IPV6; + rule->tuples_mask.ether_proto = 0xFFFF; + + rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass; + rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass; + + rule->tuples.ip_proto = ip_proto; + rule->tuples_mask.ip_proto = 0xFF; +} + +static void hclge_fd_get_ip6_tuple(struct hclge_dev *hdev, + struct ethtool_rx_flow_spec *fs, + struct hclge_fd_rule *rule) +{ + be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.usr_ip6_spec.ip6src, + IPV6_SIZE); + be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.usr_ip6_spec.ip6src, + IPV6_SIZE); + + be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.usr_ip6_spec.ip6dst, + IPV6_SIZE); + be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.usr_ip6_spec.ip6dst, + IPV6_SIZE); + + rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto; + rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto; + + rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass; + rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass; + + rule->tuples.ether_proto = ETH_P_IPV6; + rule->tuples_mask.ether_proto = 0xFFFF; +} + +static void hclge_fd_get_ether_tuple(struct hclge_dev *hdev, + struct ethtool_rx_flow_spec *fs, + struct hclge_fd_rule *rule) +{ + ether_addr_copy(rule->tuples.src_mac, fs->h_u.ether_spec.h_source); + ether_addr_copy(rule->tuples_mask.src_mac, fs->m_u.ether_spec.h_source); + + ether_addr_copy(rule->tuples.dst_mac, fs->h_u.ether_spec.h_dest); + ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_u.ether_spec.h_dest); + + rule->tuples.ether_proto = be16_to_cpu(fs->h_u.ether_spec.h_proto); + rule->tuples_mask.ether_proto = be16_to_cpu(fs->m_u.ether_spec.h_proto); +} + +static void hclge_fd_get_user_def_tuple(struct hclge_fd_user_def_info *info, + struct hclge_fd_rule *rule) +{ + switch (info->layer) { + case HCLGE_FD_USER_DEF_L2: + rule->tuples.l2_user_def = info->data; + rule->tuples_mask.l2_user_def = info->data_mask; + break; + case HCLGE_FD_USER_DEF_L3: + rule->tuples.l3_user_def = info->data; + rule->tuples_mask.l3_user_def = info->data_mask; + break; + case HCLGE_FD_USER_DEF_L4: + rule->tuples.l4_user_def = (u32)info->data << 16; + rule->tuples_mask.l4_user_def = (u32)info->data_mask << 16; + break; + default: + break; + } + + rule->ep.user_def = *info; +} + +static int hclge_fd_get_tuple(struct hclge_dev *hdev, + struct ethtool_rx_flow_spec *fs, + struct hclge_fd_rule *rule, + struct hclge_fd_user_def_info *info) +{ + u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT); + + switch (flow_type) { + case SCTP_V4_FLOW: + hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_SCTP); + break; + case TCP_V4_FLOW: + hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_TCP); + break; + case UDP_V4_FLOW: + hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_UDP); + break; + case IP_USER_FLOW: + hclge_fd_get_ip4_tuple(hdev, fs, rule); + break; + case SCTP_V6_FLOW: + hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_SCTP); + break; + case TCP_V6_FLOW: + hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_TCP); + break; + case UDP_V6_FLOW: + hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_UDP); + break; + case IPV6_USER_FLOW: + hclge_fd_get_ip6_tuple(hdev, fs, rule); + break; + case ETHER_FLOW: + hclge_fd_get_ether_tuple(hdev, fs, rule); + break; + default: + return -EOPNOTSUPP; + } + + if (fs->flow_type & FLOW_EXT) { + rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci); + rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci); + hclge_fd_get_user_def_tuple(info, rule); + } + + if (fs->flow_type & FLOW_MAC_EXT) { + ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest); + ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest); + } + + return 0; +} + +static int hclge_fd_config_rule(struct hclge_dev *hdev, + struct hclge_fd_rule *rule) +{ + int ret; + + ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule); + if (ret) + return ret; + + return hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule); +} + +static int hclge_add_fd_entry_common(struct hclge_dev *hdev, + struct hclge_fd_rule *rule) +{ + int ret; + + spin_lock_bh(&hdev->fd_rule_lock); + + if (hdev->fd_active_type != rule->rule_type && + (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE || + hdev->fd_active_type == HCLGE_FD_EP_ACTIVE)) { + dev_err(&hdev->pdev->dev, + "mode conflict(new type %d, active type %d), please delete existent rules first\n", + rule->rule_type, hdev->fd_active_type); + spin_unlock_bh(&hdev->fd_rule_lock); + return -EINVAL; + } + + ret = hclge_fd_check_user_def_refcnt(hdev, rule); + if (ret) + goto out; + + ret = hclge_clear_arfs_rules(hdev); + if (ret) + goto out; + + ret = hclge_fd_config_rule(hdev, rule); + if (ret) + goto out; + + rule->state = HCLGE_FD_ACTIVE; + hdev->fd_active_type = rule->rule_type; + hclge_update_fd_list(hdev, rule->state, rule->location, rule); + +out: + spin_unlock_bh(&hdev->fd_rule_lock); + return ret; +} + +static bool hclge_is_cls_flower_active(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE; +} + +static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie, + u16 *vport_id, u8 *action, u16 *queue_id) +{ + struct hclge_vport *vport = hdev->vport; + + if (ring_cookie == RX_CLS_FLOW_DISC) { + *action = HCLGE_FD_ACTION_DROP_PACKET; + } else { + u32 ring = ethtool_get_flow_spec_ring(ring_cookie); + u8 vf = ethtool_get_flow_spec_ring_vf(ring_cookie); + u16 tqps; + + /* To keep consistent with user's configuration, minus 1 when + * printing 'vf', because vf id from ethtool is added 1 for vf. + */ + if (vf > hdev->num_req_vfs) { + dev_err(&hdev->pdev->dev, + "Error: vf id (%u) should be less than %u\n", + vf - 1U, hdev->num_req_vfs); + return -EINVAL; + } + + *vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id; + tqps = hdev->vport[vf].nic.kinfo.num_tqps; + + if (ring >= tqps) { + dev_err(&hdev->pdev->dev, + "Error: queue id (%u) > max tqp num (%u)\n", + ring, tqps - 1U); + return -EINVAL; + } + + *action = HCLGE_FD_ACTION_SELECT_QUEUE; + *queue_id = ring; + } + + return 0; +} + +static int hclge_add_fd_entry(struct hnae3_handle *handle, + struct ethtool_rxnfc *cmd) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hclge_fd_user_def_info info; + u16 dst_vport_id = 0, q_index = 0; + struct ethtool_rx_flow_spec *fs; + struct hclge_fd_rule *rule; + u32 unused = 0; + u8 action; + int ret; + + if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) { + dev_err(&hdev->pdev->dev, + "flow table director is not supported\n"); + return -EOPNOTSUPP; + } + + if (!hdev->fd_en) { + dev_err(&hdev->pdev->dev, + "please enable flow director first\n"); + return -EOPNOTSUPP; + } + + fs = (struct ethtool_rx_flow_spec *)&cmd->fs; + + ret = hclge_fd_check_spec(hdev, fs, &unused, &info); + if (ret) + return ret; + + ret = hclge_fd_parse_ring_cookie(hdev, fs->ring_cookie, &dst_vport_id, + &action, &q_index); + if (ret) + return ret; + + rule = kzalloc(sizeof(*rule), GFP_KERNEL); + if (!rule) + return -ENOMEM; + + ret = hclge_fd_get_tuple(hdev, fs, rule, &info); + if (ret) { + kfree(rule); + return ret; + } + + rule->flow_type = fs->flow_type; + rule->location = fs->location; + rule->unused_tuple = unused; + rule->vf_id = dst_vport_id; + rule->queue_id = q_index; + rule->action = action; + rule->rule_type = HCLGE_FD_EP_ACTIVE; + + ret = hclge_add_fd_entry_common(hdev, rule); + if (ret) + kfree(rule); + + return ret; +} + +static int hclge_del_fd_entry(struct hnae3_handle *handle, + struct ethtool_rxnfc *cmd) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct ethtool_rx_flow_spec *fs; + int ret; + + if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) + return -EOPNOTSUPP; + + fs = (struct ethtool_rx_flow_spec *)&cmd->fs; + + if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) + return -EINVAL; + + spin_lock_bh(&hdev->fd_rule_lock); + if (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE || + !test_bit(fs->location, hdev->fd_bmap)) { + dev_err(&hdev->pdev->dev, + "Delete fail, rule %u is inexistent\n", fs->location); + spin_unlock_bh(&hdev->fd_rule_lock); + return -ENOENT; + } + + ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location, + NULL, false); + if (ret) + goto out; + + hclge_update_fd_list(hdev, HCLGE_FD_DELETED, fs->location, NULL); + +out: + spin_unlock_bh(&hdev->fd_rule_lock); + return ret; +} + +static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev, + bool clear_list) +{ + struct hclge_fd_rule *rule; + struct hlist_node *node; + u16 location; + + spin_lock_bh(&hdev->fd_rule_lock); + + for_each_set_bit(location, hdev->fd_bmap, + hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) + hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location, + NULL, false); + + if (clear_list) { + hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, + rule_node) { + hlist_del(&rule->rule_node); + kfree(rule); + } + hdev->fd_active_type = HCLGE_FD_RULE_NONE; + hdev->hclge_fd_rule_num = 0; + bitmap_zero(hdev->fd_bmap, + hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]); + } + + spin_unlock_bh(&hdev->fd_rule_lock); +} + +static void hclge_del_all_fd_entries(struct hclge_dev *hdev) +{ + if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) + return; + + hclge_clear_fd_rules_in_list(hdev, true); + hclge_fd_disable_user_def(hdev); +} + +static int hclge_restore_fd_entries(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hclge_fd_rule *rule; + struct hlist_node *node; + + /* Return ok here, because reset error handling will check this + * return value. If error is returned here, the reset process will + * fail. + */ + if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) + return 0; + + /* if fd is disabled, should not restore it when reset */ + if (!hdev->fd_en) + return 0; + + spin_lock_bh(&hdev->fd_rule_lock); + hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { + if (rule->state == HCLGE_FD_ACTIVE) + rule->state = HCLGE_FD_TO_ADD; + } + spin_unlock_bh(&hdev->fd_rule_lock); + set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); + + return 0; +} + +static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle, + struct ethtool_rxnfc *cmd) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + if (!hnae3_ae_dev_fd_supported(hdev->ae_dev) || hclge_is_cls_flower_active(handle)) + return -EOPNOTSUPP; + + cmd->rule_cnt = hdev->hclge_fd_rule_num; + cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]; + + return 0; +} + +static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule, + struct ethtool_tcpip4_spec *spec, + struct ethtool_tcpip4_spec *spec_mask) +{ + spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]); + spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ? + 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]); + + spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]); + spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ? + 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]); + + spec->psrc = cpu_to_be16(rule->tuples.src_port); + spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ? + 0 : cpu_to_be16(rule->tuples_mask.src_port); + + spec->pdst = cpu_to_be16(rule->tuples.dst_port); + spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ? + 0 : cpu_to_be16(rule->tuples_mask.dst_port); + + spec->tos = rule->tuples.ip_tos; + spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ? + 0 : rule->tuples_mask.ip_tos; +} + +static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule, + struct ethtool_usrip4_spec *spec, + struct ethtool_usrip4_spec *spec_mask) +{ + spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]); + spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ? + 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]); + + spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]); + spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ? + 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]); + + spec->tos = rule->tuples.ip_tos; + spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ? + 0 : rule->tuples_mask.ip_tos; + + spec->proto = rule->tuples.ip_proto; + spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ? + 0 : rule->tuples_mask.ip_proto; + + spec->ip_ver = ETH_RX_NFC_IP4; +} + +static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule, + struct ethtool_tcpip6_spec *spec, + struct ethtool_tcpip6_spec *spec_mask) +{ + cpu_to_be32_array(spec->ip6src, + rule->tuples.src_ip, IPV6_SIZE); + cpu_to_be32_array(spec->ip6dst, + rule->tuples.dst_ip, IPV6_SIZE); + if (rule->unused_tuple & BIT(INNER_SRC_IP)) + memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src)); + else + cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip, + IPV6_SIZE); + + if (rule->unused_tuple & BIT(INNER_DST_IP)) + memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst)); + else + cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip, + IPV6_SIZE); + + spec->tclass = rule->tuples.ip_tos; + spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ? + 0 : rule->tuples_mask.ip_tos; + + spec->psrc = cpu_to_be16(rule->tuples.src_port); + spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ? + 0 : cpu_to_be16(rule->tuples_mask.src_port); + + spec->pdst = cpu_to_be16(rule->tuples.dst_port); + spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ? + 0 : cpu_to_be16(rule->tuples_mask.dst_port); +} + +static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule, + struct ethtool_usrip6_spec *spec, + struct ethtool_usrip6_spec *spec_mask) +{ + cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE); + cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE); + if (rule->unused_tuple & BIT(INNER_SRC_IP)) + memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src)); + else + cpu_to_be32_array(spec_mask->ip6src, + rule->tuples_mask.src_ip, IPV6_SIZE); + + if (rule->unused_tuple & BIT(INNER_DST_IP)) + memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst)); + else + cpu_to_be32_array(spec_mask->ip6dst, + rule->tuples_mask.dst_ip, IPV6_SIZE); + + spec->tclass = rule->tuples.ip_tos; + spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ? + 0 : rule->tuples_mask.ip_tos; + + spec->l4_proto = rule->tuples.ip_proto; + spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ? + 0 : rule->tuples_mask.ip_proto; +} + +static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule, + struct ethhdr *spec, + struct ethhdr *spec_mask) +{ + ether_addr_copy(spec->h_source, rule->tuples.src_mac); + ether_addr_copy(spec->h_dest, rule->tuples.dst_mac); + + if (rule->unused_tuple & BIT(INNER_SRC_MAC)) + eth_zero_addr(spec_mask->h_source); + else + ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac); + + if (rule->unused_tuple & BIT(INNER_DST_MAC)) + eth_zero_addr(spec_mask->h_dest); + else + ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac); + + spec->h_proto = cpu_to_be16(rule->tuples.ether_proto); + spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ? + 0 : cpu_to_be16(rule->tuples_mask.ether_proto); +} + +static void hclge_fd_get_user_def_info(struct ethtool_rx_flow_spec *fs, + struct hclge_fd_rule *rule) +{ + if ((rule->unused_tuple & HCLGE_FD_TUPLE_USER_DEF_TUPLES) == + HCLGE_FD_TUPLE_USER_DEF_TUPLES) { + fs->h_ext.data[0] = 0; + fs->h_ext.data[1] = 0; + fs->m_ext.data[0] = 0; + fs->m_ext.data[1] = 0; + } else { + fs->h_ext.data[0] = cpu_to_be32(rule->ep.user_def.offset); + fs->h_ext.data[1] = cpu_to_be32(rule->ep.user_def.data); + fs->m_ext.data[0] = + cpu_to_be32(HCLGE_FD_USER_DEF_OFFSET_UNMASK); + fs->m_ext.data[1] = cpu_to_be32(rule->ep.user_def.data_mask); + } +} + +static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs, + struct hclge_fd_rule *rule) +{ + if (fs->flow_type & FLOW_EXT) { + fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1); + fs->m_ext.vlan_tci = + rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ? + 0 : cpu_to_be16(rule->tuples_mask.vlan_tag1); + + hclge_fd_get_user_def_info(fs, rule); + } + + if (fs->flow_type & FLOW_MAC_EXT) { + ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac); + if (rule->unused_tuple & BIT(INNER_DST_MAC)) + eth_zero_addr(fs->m_u.ether_spec.h_dest); + else + ether_addr_copy(fs->m_u.ether_spec.h_dest, + rule->tuples_mask.dst_mac); + } +} + +static struct hclge_fd_rule *hclge_get_fd_rule(struct hclge_dev *hdev, + u16 location) +{ + struct hclge_fd_rule *rule = NULL; + struct hlist_node *node2; + + hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) { + if (rule->location == location) + return rule; + else if (rule->location > location) + return NULL; + } + + return NULL; +} + +static void hclge_fd_get_ring_cookie(struct ethtool_rx_flow_spec *fs, + struct hclge_fd_rule *rule) +{ + if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) { + fs->ring_cookie = RX_CLS_FLOW_DISC; + } else { + u64 vf_id; + + fs->ring_cookie = rule->queue_id; + vf_id = rule->vf_id; + vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; + fs->ring_cookie |= vf_id; + } +} + +static int hclge_get_fd_rule_info(struct hnae3_handle *handle, + struct ethtool_rxnfc *cmd) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_fd_rule *rule = NULL; + struct hclge_dev *hdev = vport->back; + struct ethtool_rx_flow_spec *fs; + + if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) + return -EOPNOTSUPP; + + fs = (struct ethtool_rx_flow_spec *)&cmd->fs; + + spin_lock_bh(&hdev->fd_rule_lock); + + rule = hclge_get_fd_rule(hdev, fs->location); + if (!rule) { + spin_unlock_bh(&hdev->fd_rule_lock); + return -ENOENT; + } + + fs->flow_type = rule->flow_type; + switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { + case SCTP_V4_FLOW: + case TCP_V4_FLOW: + case UDP_V4_FLOW: + hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec, + &fs->m_u.tcp_ip4_spec); + break; + case IP_USER_FLOW: + hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec, + &fs->m_u.usr_ip4_spec); + break; + case SCTP_V6_FLOW: + case TCP_V6_FLOW: + case UDP_V6_FLOW: + hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec, + &fs->m_u.tcp_ip6_spec); + break; + case IPV6_USER_FLOW: + hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec, + &fs->m_u.usr_ip6_spec); + break; + /* The flow type of fd rule has been checked before adding in to rule + * list. As other flow types have been handled, it must be ETHER_FLOW + * for the default case + */ + default: + hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec, + &fs->m_u.ether_spec); + break; + } + + hclge_fd_get_ext_info(fs, rule); + + hclge_fd_get_ring_cookie(fs, rule); + + spin_unlock_bh(&hdev->fd_rule_lock); + + return 0; +} + +static int hclge_get_all_rules(struct hnae3_handle *handle, + struct ethtool_rxnfc *cmd, u32 *rule_locs) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hclge_fd_rule *rule; + struct hlist_node *node2; + int cnt = 0; + + if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) + return -EOPNOTSUPP; + + cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]; + + spin_lock_bh(&hdev->fd_rule_lock); + hlist_for_each_entry_safe(rule, node2, + &hdev->fd_rule_list, rule_node) { + if (cnt == cmd->rule_cnt) { + spin_unlock_bh(&hdev->fd_rule_lock); + return -EMSGSIZE; + } + + if (rule->state == HCLGE_FD_TO_DEL) + continue; + + rule_locs[cnt] = rule->location; + cnt++; + } + + spin_unlock_bh(&hdev->fd_rule_lock); + + cmd->rule_cnt = cnt; + + return 0; +} + +static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys, + struct hclge_fd_rule_tuples *tuples) +{ +#define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32 +#define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32 + + tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto); + tuples->ip_proto = fkeys->basic.ip_proto; + tuples->dst_port = be16_to_cpu(fkeys->ports.dst); + + if (fkeys->basic.n_proto == htons(ETH_P_IP)) { + tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src); + tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst); + } else { + int i; + + for (i = 0; i < IPV6_SIZE; i++) { + tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]); + tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]); + } + } +} + +/* traverse all rules, check whether an existed rule has the same tuples */ +static struct hclge_fd_rule * +hclge_fd_search_flow_keys(struct hclge_dev *hdev, + const struct hclge_fd_rule_tuples *tuples) +{ + struct hclge_fd_rule *rule = NULL; + struct hlist_node *node; + + hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { + if (!memcmp(tuples, &rule->tuples, sizeof(*tuples))) + return rule; + } + + return NULL; +} + +static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples, + struct hclge_fd_rule *rule) +{ + rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | + BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) | + BIT(INNER_SRC_PORT); + rule->action = 0; + rule->vf_id = 0; + rule->rule_type = HCLGE_FD_ARFS_ACTIVE; + rule->state = HCLGE_FD_TO_ADD; + if (tuples->ether_proto == ETH_P_IP) { + if (tuples->ip_proto == IPPROTO_TCP) + rule->flow_type = TCP_V4_FLOW; + else + rule->flow_type = UDP_V4_FLOW; + } else { + if (tuples->ip_proto == IPPROTO_TCP) + rule->flow_type = TCP_V6_FLOW; + else + rule->flow_type = UDP_V6_FLOW; + } + memcpy(&rule->tuples, tuples, sizeof(rule->tuples)); + memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask)); +} + +static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id, + u16 flow_id, struct flow_keys *fkeys) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_fd_rule_tuples new_tuples = {}; + struct hclge_dev *hdev = vport->back; + struct hclge_fd_rule *rule; + u16 bit_id; + + if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) + return -EOPNOTSUPP; + + /* when there is already fd rule existed add by user, + * arfs should not work + */ + spin_lock_bh(&hdev->fd_rule_lock); + if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE && + hdev->fd_active_type != HCLGE_FD_RULE_NONE) { + spin_unlock_bh(&hdev->fd_rule_lock); + return -EOPNOTSUPP; + } + + hclge_fd_get_flow_tuples(fkeys, &new_tuples); + + /* check is there flow director filter existed for this flow, + * if not, create a new filter for it; + * if filter exist with different queue id, modify the filter; + * if filter exist with same queue id, do nothing + */ + rule = hclge_fd_search_flow_keys(hdev, &new_tuples); + if (!rule) { + bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM); + if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) { + spin_unlock_bh(&hdev->fd_rule_lock); + return -ENOSPC; + } + + rule = kzalloc(sizeof(*rule), GFP_ATOMIC); + if (!rule) { + spin_unlock_bh(&hdev->fd_rule_lock); + return -ENOMEM; + } + + rule->location = bit_id; + rule->arfs.flow_id = flow_id; + rule->queue_id = queue_id; + hclge_fd_build_arfs_rule(&new_tuples, rule); + hclge_update_fd_list(hdev, rule->state, rule->location, rule); + hdev->fd_active_type = HCLGE_FD_ARFS_ACTIVE; + } else if (rule->queue_id != queue_id) { + rule->queue_id = queue_id; + rule->state = HCLGE_FD_TO_ADD; + set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); + hclge_task_schedule(hdev, 0); + } + spin_unlock_bh(&hdev->fd_rule_lock); + return rule->location; +} + +static void hclge_rfs_filter_expire(struct hclge_dev *hdev) +{ +#ifdef CONFIG_RFS_ACCEL + struct hnae3_handle *handle = &hdev->vport[0].nic; + struct hclge_fd_rule *rule; + struct hlist_node *node; + + spin_lock_bh(&hdev->fd_rule_lock); + if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) { + spin_unlock_bh(&hdev->fd_rule_lock); + return; + } + hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { + if (rule->state != HCLGE_FD_ACTIVE) + continue; + if (rps_may_expire_flow(handle->netdev, rule->queue_id, + rule->arfs.flow_id, rule->location)) { + rule->state = HCLGE_FD_TO_DEL; + set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); + } + } + spin_unlock_bh(&hdev->fd_rule_lock); +#endif +} + +/* make sure being called after lock up with fd_rule_lock */ +static int hclge_clear_arfs_rules(struct hclge_dev *hdev) +{ +#ifdef CONFIG_RFS_ACCEL + struct hclge_fd_rule *rule; + struct hlist_node *node; + int ret; + + if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) + return 0; + + hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { + switch (rule->state) { + case HCLGE_FD_TO_DEL: + case HCLGE_FD_ACTIVE: + ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, + rule->location, NULL, false); + if (ret) + return ret; + fallthrough; + case HCLGE_FD_TO_ADD: + hclge_fd_dec_rule_cnt(hdev, rule->location); + hlist_del(&rule->rule_node); + kfree(rule); + break; + default: + break; + } + } + hclge_sync_fd_state(hdev); + +#endif + return 0; +} + +static void hclge_get_cls_key_basic(const struct flow_rule *flow, + struct hclge_fd_rule *rule) +{ + if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_match_basic match; + u16 ethtype_key, ethtype_mask; + + flow_rule_match_basic(flow, &match); + ethtype_key = ntohs(match.key->n_proto); + ethtype_mask = ntohs(match.mask->n_proto); + + if (ethtype_key == ETH_P_ALL) { + ethtype_key = 0; + ethtype_mask = 0; + } + rule->tuples.ether_proto = ethtype_key; + rule->tuples_mask.ether_proto = ethtype_mask; + rule->tuples.ip_proto = match.key->ip_proto; + rule->tuples_mask.ip_proto = match.mask->ip_proto; + } else { + rule->unused_tuple |= BIT(INNER_IP_PROTO); + rule->unused_tuple |= BIT(INNER_ETH_TYPE); + } +} + +static void hclge_get_cls_key_mac(const struct flow_rule *flow, + struct hclge_fd_rule *rule) +{ + if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + struct flow_match_eth_addrs match; + + flow_rule_match_eth_addrs(flow, &match); + ether_addr_copy(rule->tuples.dst_mac, match.key->dst); + ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst); + ether_addr_copy(rule->tuples.src_mac, match.key->src); + ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src); + } else { + rule->unused_tuple |= BIT(INNER_DST_MAC); + rule->unused_tuple |= BIT(INNER_SRC_MAC); + } +} + +static void hclge_get_cls_key_vlan(const struct flow_rule *flow, + struct hclge_fd_rule *rule) +{ + if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) { + struct flow_match_vlan match; + + flow_rule_match_vlan(flow, &match); + rule->tuples.vlan_tag1 = match.key->vlan_id | + (match.key->vlan_priority << VLAN_PRIO_SHIFT); + rule->tuples_mask.vlan_tag1 = match.mask->vlan_id | + (match.mask->vlan_priority << VLAN_PRIO_SHIFT); + } else { + rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST); + } +} + +static void hclge_get_cls_key_ip(const struct flow_rule *flow, + struct hclge_fd_rule *rule) +{ + u16 addr_type = 0; + + if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_CONTROL)) { + struct flow_match_control match; + + flow_rule_match_control(flow, &match); + addr_type = match.key->addr_type; + } + + if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { + struct flow_match_ipv4_addrs match; + + flow_rule_match_ipv4_addrs(flow, &match); + rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src); + rule->tuples_mask.src_ip[IPV4_INDEX] = + be32_to_cpu(match.mask->src); + rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst); + rule->tuples_mask.dst_ip[IPV4_INDEX] = + be32_to_cpu(match.mask->dst); + } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { + struct flow_match_ipv6_addrs match; + + flow_rule_match_ipv6_addrs(flow, &match); + be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32, + IPV6_SIZE); + be32_to_cpu_array(rule->tuples_mask.src_ip, + match.mask->src.s6_addr32, IPV6_SIZE); + be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32, + IPV6_SIZE); + be32_to_cpu_array(rule->tuples_mask.dst_ip, + match.mask->dst.s6_addr32, IPV6_SIZE); + } else { + rule->unused_tuple |= BIT(INNER_SRC_IP); + rule->unused_tuple |= BIT(INNER_DST_IP); + } +} + +static void hclge_get_cls_key_port(const struct flow_rule *flow, + struct hclge_fd_rule *rule) +{ + if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) { + struct flow_match_ports match; + + flow_rule_match_ports(flow, &match); + + rule->tuples.src_port = be16_to_cpu(match.key->src); + rule->tuples_mask.src_port = be16_to_cpu(match.mask->src); + rule->tuples.dst_port = be16_to_cpu(match.key->dst); + rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst); + } else { + rule->unused_tuple |= BIT(INNER_SRC_PORT); + rule->unused_tuple |= BIT(INNER_DST_PORT); + } +} + +static int hclge_parse_cls_flower(struct hclge_dev *hdev, + struct flow_cls_offload *cls_flower, + struct hclge_fd_rule *rule) +{ + struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower); + struct flow_dissector *dissector = flow->match.dissector; + + if (dissector->used_keys & + ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | + BIT(FLOW_DISSECTOR_KEY_BASIC) | + BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_VLAN) | + BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_PORTS))) { + dev_err(&hdev->pdev->dev, "unsupported key set: %#x\n", + dissector->used_keys); + return -EOPNOTSUPP; + } + + hclge_get_cls_key_basic(flow, rule); + hclge_get_cls_key_mac(flow, rule); + hclge_get_cls_key_vlan(flow, rule); + hclge_get_cls_key_ip(flow, rule); + hclge_get_cls_key_port(flow, rule); + + return 0; +} + +static int hclge_check_cls_flower(struct hclge_dev *hdev, + struct flow_cls_offload *cls_flower, int tc) +{ + u32 prio = cls_flower->common.prio; + + if (tc < 0 || tc > hdev->tc_max) { + dev_err(&hdev->pdev->dev, "invalid traffic class\n"); + return -EINVAL; + } + + if (prio == 0 || + prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) { + dev_err(&hdev->pdev->dev, + "prio %u should be in range[1, %u]\n", + prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]); + return -EINVAL; + } + + if (test_bit(prio - 1, hdev->fd_bmap)) { + dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio); + return -EINVAL; + } + return 0; +} + +static int hclge_add_cls_flower(struct hnae3_handle *handle, + struct flow_cls_offload *cls_flower, + int tc) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hclge_fd_rule *rule; + int ret; + + if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) { + dev_err(&hdev->pdev->dev, + "cls flower is not supported\n"); + return -EOPNOTSUPP; + } + + ret = hclge_check_cls_flower(hdev, cls_flower, tc); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to check cls flower params, ret = %d\n", ret); + return ret; + } + + rule = kzalloc(sizeof(*rule), GFP_KERNEL); + if (!rule) + return -ENOMEM; + + ret = hclge_parse_cls_flower(hdev, cls_flower, rule); + if (ret) { + kfree(rule); + return ret; + } + + rule->action = HCLGE_FD_ACTION_SELECT_TC; + rule->cls_flower.tc = tc; + rule->location = cls_flower->common.prio - 1; + rule->vf_id = 0; + rule->cls_flower.cookie = cls_flower->cookie; + rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE; + + ret = hclge_add_fd_entry_common(hdev, rule); + if (ret) + kfree(rule); + + return ret; +} + +static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev, + unsigned long cookie) +{ + struct hclge_fd_rule *rule; + struct hlist_node *node; + + hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { + if (rule->cls_flower.cookie == cookie) + return rule; + } + + return NULL; +} + +static int hclge_del_cls_flower(struct hnae3_handle *handle, + struct flow_cls_offload *cls_flower) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hclge_fd_rule *rule; + int ret; + + if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) + return -EOPNOTSUPP; + + spin_lock_bh(&hdev->fd_rule_lock); + + rule = hclge_find_cls_flower(hdev, cls_flower->cookie); + if (!rule) { + spin_unlock_bh(&hdev->fd_rule_lock); + return -EINVAL; + } + + ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location, + NULL, false); + if (ret) { + /* if tcam config fail, set rule state to TO_DEL, + * so the rule will be deleted when periodic + * task being scheduled. + */ + hclge_update_fd_list(hdev, HCLGE_FD_TO_DEL, rule->location, NULL); + set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); + spin_unlock_bh(&hdev->fd_rule_lock); + return ret; + } + + hclge_update_fd_list(hdev, HCLGE_FD_DELETED, rule->location, NULL); + spin_unlock_bh(&hdev->fd_rule_lock); + + return 0; +} + +static void hclge_sync_fd_list(struct hclge_dev *hdev, struct hlist_head *hlist) +{ + struct hclge_fd_rule *rule; + struct hlist_node *node; + int ret = 0; + + if (!test_and_clear_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state)) + return; + + spin_lock_bh(&hdev->fd_rule_lock); + + hlist_for_each_entry_safe(rule, node, hlist, rule_node) { + switch (rule->state) { + case HCLGE_FD_TO_ADD: + ret = hclge_fd_config_rule(hdev, rule); + if (ret) + goto out; + rule->state = HCLGE_FD_ACTIVE; + break; + case HCLGE_FD_TO_DEL: + ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, + rule->location, NULL, false); + if (ret) + goto out; + hclge_fd_dec_rule_cnt(hdev, rule->location); + hclge_fd_free_node(hdev, rule); + break; + default: + break; + } + } + +out: + if (ret) + set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); + + spin_unlock_bh(&hdev->fd_rule_lock); +} + +static void hclge_sync_fd_table(struct hclge_dev *hdev) +{ + if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) + return; + + if (test_and_clear_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state)) { + bool clear_list = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE; + + hclge_clear_fd_rules_in_list(hdev, clear_list); + } + + hclge_sync_fd_user_def_cfg(hdev, false); + + hclge_sync_fd_list(hdev, &hdev->fd_rule_list); +} + +static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) || + hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING); +} + +static bool hclge_get_cmdq_stat(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + return test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); +} + +static bool hclge_ae_dev_resetting(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); +} + +static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + return hdev->rst_stats.hw_reset_done_cnt; +} + +static void hclge_enable_fd(struct hnae3_handle *handle, bool enable) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + hdev->fd_en = enable; + + if (!enable) + set_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state); + else + hclge_restore_fd_entries(handle); + + hclge_task_schedule(hdev, 0); +} + +static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) +{ +#define HCLGE_LINK_STATUS_WAIT_CNT 3 + + struct hclge_desc desc; + struct hclge_config_mac_mode_cmd *req = + (struct hclge_config_mac_mode_cmd *)desc.data; + u32 loop_en = 0; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false); + + if (enable) { + hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U); + hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U); + hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U); + hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U); + hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U); + hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U); + hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U); + hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U); + hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U); + hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U); + } + + req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "mac enable fail, ret =%d.\n", ret); + return; + } + + if (!enable) + hclge_mac_link_status_wait(hdev, HCLGE_LINK_STATUS_DOWN, + HCLGE_LINK_STATUS_WAIT_CNT); +} + +static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid, + u8 switch_param, u8 param_mask) +{ + struct hclge_mac_vlan_switch_cmd *req; + struct hclge_desc desc; + u32 func_id; + int ret; + + func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0); + req = (struct hclge_mac_vlan_switch_cmd *)desc.data; + + /* read current config parameter */ + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM, + true); + req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL; + req->func_id = cpu_to_le32(func_id); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "read mac vlan switch parameter fail, ret = %d\n", ret); + return ret; + } + + /* modify and write new config parameter */ + hclge_comm_cmd_reuse_desc(&desc, false); + req->switch_param = (req->switch_param & param_mask) | switch_param; + req->param_mask = param_mask; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "set mac vlan switch parameter fail, ret = %d\n", ret); + return ret; +} + +static void hclge_phy_link_status_wait(struct hclge_dev *hdev, + int link_ret) +{ +#define HCLGE_PHY_LINK_STATUS_NUM 200 + + struct phy_device *phydev = hdev->hw.mac.phydev; + int i = 0; + int ret; + + do { + ret = phy_read_status(phydev); + if (ret) { + dev_err(&hdev->pdev->dev, + "phy update link status fail, ret = %d\n", ret); + return; + } + + if (phydev->link == link_ret) + break; + + msleep(HCLGE_LINK_STATUS_MS); + } while (++i < HCLGE_PHY_LINK_STATUS_NUM); +} + +static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret, + int wait_cnt) +{ + int link_status; + int i = 0; + int ret; + + do { + ret = hclge_get_mac_link_status(hdev, &link_status); + if (ret) + return ret; + if (link_status == link_ret) + return 0; + + msleep(HCLGE_LINK_STATUS_MS); + } while (++i < wait_cnt); + return -EBUSY; +} + +static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en, + bool is_phy) +{ +#define HCLGE_MAC_LINK_STATUS_NUM 100 + + int link_ret; + + link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN; + + if (is_phy) + hclge_phy_link_status_wait(hdev, link_ret); + + return hclge_mac_link_status_wait(hdev, link_ret, + HCLGE_MAC_LINK_STATUS_NUM); +} + +static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en) +{ + struct hclge_config_mac_mode_cmd *req; + struct hclge_desc desc; + u32 loop_en; + int ret; + + req = (struct hclge_config_mac_mode_cmd *)&desc.data[0]; + /* 1 Read out the MAC mode config at first */ + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "mac loopback get fail, ret =%d.\n", ret); + return ret; + } + + /* 2 Then setup the loopback flag */ + loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en); + hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0); + + req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); + + /* 3 Config mac work mode with loopback flag + * and its original configure parameters + */ + hclge_comm_cmd_reuse_desc(&desc, false); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "mac loopback set fail, ret =%d.\n", ret); + return ret; +} + +static int hclge_cfg_common_loopback_cmd_send(struct hclge_dev *hdev, bool en, + enum hnae3_loop loop_mode) +{ + struct hclge_common_lb_cmd *req; + struct hclge_desc desc; + u8 loop_mode_b; + int ret; + + req = (struct hclge_common_lb_cmd *)desc.data; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, false); + + switch (loop_mode) { + case HNAE3_LOOP_SERIAL_SERDES: + loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B; + break; + case HNAE3_LOOP_PARALLEL_SERDES: + loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B; + break; + case HNAE3_LOOP_PHY: + loop_mode_b = HCLGE_CMD_GE_PHY_INNER_LOOP_B; + break; + default: + dev_err(&hdev->pdev->dev, + "unsupported loopback mode %d\n", loop_mode); + return -ENOTSUPP; + } + + req->mask = loop_mode_b; + if (en) + req->enable = loop_mode_b; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to send loopback cmd, loop_mode = %d, ret = %d\n", + loop_mode, ret); + + return ret; +} + +static int hclge_cfg_common_loopback_wait(struct hclge_dev *hdev) +{ +#define HCLGE_COMMON_LB_RETRY_MS 10 +#define HCLGE_COMMON_LB_RETRY_NUM 100 + + struct hclge_common_lb_cmd *req; + struct hclge_desc desc; + u32 i = 0; + int ret; + + req = (struct hclge_common_lb_cmd *)desc.data; + + do { + msleep(HCLGE_COMMON_LB_RETRY_MS); + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, + true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get loopback done status, ret = %d\n", + ret); + return ret; + } + } while (++i < HCLGE_COMMON_LB_RETRY_NUM && + !(req->result & HCLGE_CMD_COMMON_LB_DONE_B)); + + if (!(req->result & HCLGE_CMD_COMMON_LB_DONE_B)) { + dev_err(&hdev->pdev->dev, "wait loopback timeout\n"); + return -EBUSY; + } else if (!(req->result & HCLGE_CMD_COMMON_LB_SUCCESS_B)) { + dev_err(&hdev->pdev->dev, "failed to do loopback test\n"); + return -EIO; + } + + return 0; +} + +static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en, + enum hnae3_loop loop_mode) +{ + int ret; + + ret = hclge_cfg_common_loopback_cmd_send(hdev, en, loop_mode); + if (ret) + return ret; + + return hclge_cfg_common_loopback_wait(hdev); +} + +static int hclge_set_common_loopback(struct hclge_dev *hdev, bool en, + enum hnae3_loop loop_mode) +{ + int ret; + + ret = hclge_cfg_common_loopback(hdev, en, loop_mode); + if (ret) + return ret; + + hclge_cfg_mac_mode(hdev, en); + + ret = hclge_mac_phy_link_status_wait(hdev, en, false); + if (ret) + dev_err(&hdev->pdev->dev, + "serdes loopback config mac mode timeout\n"); + + return ret; +} + +static int hclge_enable_phy_loopback(struct hclge_dev *hdev, + struct phy_device *phydev) +{ + int ret; + + if (!phydev->suspended) { + ret = phy_suspend(phydev); + if (ret) + return ret; + } + + ret = phy_resume(phydev); + if (ret) + return ret; + + return phy_loopback(phydev, true); +} + +static int hclge_disable_phy_loopback(struct hclge_dev *hdev, + struct phy_device *phydev) +{ + int ret; + + ret = phy_loopback(phydev, false); + if (ret) + return ret; + + return phy_suspend(phydev); +} + +static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en) +{ + struct phy_device *phydev = hdev->hw.mac.phydev; + int ret; + + if (!phydev) { + if (hnae3_dev_phy_imp_supported(hdev)) + return hclge_set_common_loopback(hdev, en, + HNAE3_LOOP_PHY); + return -ENOTSUPP; + } + + if (en) + ret = hclge_enable_phy_loopback(hdev, phydev); + else + ret = hclge_disable_phy_loopback(hdev, phydev); + if (ret) { + dev_err(&hdev->pdev->dev, + "set phy loopback fail, ret = %d\n", ret); + return ret; + } + + hclge_cfg_mac_mode(hdev, en); + + ret = hclge_mac_phy_link_status_wait(hdev, en, true); + if (ret) + dev_err(&hdev->pdev->dev, + "phy loopback config mac mode timeout\n"); + + return ret; +} + +static int hclge_tqp_enable_cmd_send(struct hclge_dev *hdev, u16 tqp_id, + u16 stream_id, bool enable) +{ + struct hclge_desc desc; + struct hclge_cfg_com_tqp_queue_cmd *req = + (struct hclge_cfg_com_tqp_queue_cmd *)desc.data; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false); + req->tqp_id = cpu_to_le16(tqp_id); + req->stream_id = cpu_to_le16(stream_id); + if (enable) + req->enable |= 1U << HCLGE_TQP_ENABLE_B; + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_tqp_enable(struct hnae3_handle *handle, bool enable) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + int ret; + u16 i; + + for (i = 0; i < handle->kinfo.num_tqps; i++) { + ret = hclge_tqp_enable_cmd_send(hdev, i, 0, enable); + if (ret) + return ret; + } + return 0; +} + +static int hclge_set_loopback(struct hnae3_handle *handle, + enum hnae3_loop loop_mode, bool en) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + int ret = 0; + + /* Loopback can be enabled in three places: SSU, MAC, and serdes. By + * default, SSU loopback is enabled, so if the SMAC and the DMAC are + * the same, the packets are looped back in the SSU. If SSU loopback + * is disabled, packets can reach MAC even if SMAC is the same as DMAC. + */ + if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { + u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B); + + ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param, + HCLGE_SWITCH_ALW_LPBK_MASK); + if (ret) + return ret; + } + + switch (loop_mode) { + case HNAE3_LOOP_APP: + ret = hclge_set_app_loopback(hdev, en); + break; + case HNAE3_LOOP_SERIAL_SERDES: + case HNAE3_LOOP_PARALLEL_SERDES: + ret = hclge_set_common_loopback(hdev, en, loop_mode); + break; + case HNAE3_LOOP_PHY: + ret = hclge_set_phy_loopback(hdev, en); + break; + case HNAE3_LOOP_EXTERNAL: + break; + default: + ret = -ENOTSUPP; + dev_err(&hdev->pdev->dev, + "loop_mode %d is not supported\n", loop_mode); + break; + } + + if (ret) + return ret; + + ret = hclge_tqp_enable(handle, en); + if (ret) + dev_err(&hdev->pdev->dev, "failed to %s tqp in loopback, ret = %d\n", + en ? "enable" : "disable", ret); + + return ret; +} + +static int hclge_set_default_loopback(struct hclge_dev *hdev) +{ + int ret; + + ret = hclge_set_app_loopback(hdev, false); + if (ret) + return ret; + + ret = hclge_cfg_common_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES); + if (ret) + return ret; + + return hclge_cfg_common_loopback(hdev, false, + HNAE3_LOOP_PARALLEL_SERDES); +} + +static void hclge_flush_link_update(struct hclge_dev *hdev) +{ +#define HCLGE_FLUSH_LINK_TIMEOUT 100000 + + unsigned long last = hdev->serv_processed_cnt; + int i = 0; + + while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) && + i++ < HCLGE_FLUSH_LINK_TIMEOUT && + last == hdev->serv_processed_cnt) + usleep_range(1, 1); +} + +static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + if (enable) { + hclge_task_schedule(hdev, 0); + } else { + /* Set the DOWN flag here to disable link updating */ + set_bit(HCLGE_STATE_DOWN, &hdev->state); + + /* flush memory to make sure DOWN is seen by service task */ + smp_mb__before_atomic(); + hclge_flush_link_update(hdev); + } +} + +static int hclge_ae_start(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + /* mac enable */ + hclge_cfg_mac_mode(hdev, true); + clear_bit(HCLGE_STATE_DOWN, &hdev->state); + hdev->hw.mac.link = 0; + + /* reset tqp stats */ + hclge_comm_reset_tqp_stats(handle); + + hclge_mac_start_phy(hdev); + + return 0; +} + +static void hclge_ae_stop(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + set_bit(HCLGE_STATE_DOWN, &hdev->state); + spin_lock_bh(&hdev->fd_rule_lock); + hclge_clear_arfs_rules(hdev); + spin_unlock_bh(&hdev->fd_rule_lock); + + /* If it is not PF reset or FLR, the firmware will disable the MAC, + * so it only need to stop phy here. + */ + if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) { + hclge_pfc_pause_en_cfg(hdev, HCLGE_PFC_TX_RX_DISABLE, + HCLGE_PFC_DISABLE); + if (hdev->reset_type != HNAE3_FUNC_RESET && + hdev->reset_type != HNAE3_FLR_RESET) { + hclge_mac_stop_phy(hdev); + hclge_update_link_status(hdev); + return; + } + } + + hclge_reset_tqp(handle); + + hclge_config_mac_tnl_int(hdev, false); + + /* Mac disable */ + hclge_cfg_mac_mode(hdev, false); + + hclge_mac_stop_phy(hdev); + + /* reset tqp stats */ + hclge_comm_reset_tqp_stats(handle); + hclge_update_link_status(hdev); +} + +int hclge_vport_start(struct hclge_vport *vport) +{ + struct hclge_dev *hdev = vport->back; + + set_bit(HCLGE_VPORT_STATE_INITED, &vport->state); + set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); + set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state); + vport->last_active_jiffies = jiffies; + vport->need_notify = 0; + + if (test_bit(vport->vport_id, hdev->vport_config_block)) { + if (vport->vport_id) { + hclge_restore_mac_table_common(vport); + hclge_restore_vport_vlan_table(vport); + } else { + hclge_restore_hw_table(hdev); + } + } + + clear_bit(vport->vport_id, hdev->vport_config_block); + + return 0; +} + +void hclge_vport_stop(struct hclge_vport *vport) +{ + clear_bit(HCLGE_VPORT_STATE_INITED, &vport->state); + clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); + vport->need_notify = 0; +} + +static int hclge_client_start(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + + return hclge_vport_start(vport); +} + +static void hclge_client_stop(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + + hclge_vport_stop(vport); +} + +static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport, + u16 cmdq_resp, u8 resp_code, + enum hclge_mac_vlan_tbl_opcode op) +{ + struct hclge_dev *hdev = vport->back; + + if (cmdq_resp) { + dev_err(&hdev->pdev->dev, + "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n", + cmdq_resp); + return -EIO; + } + + if (op == HCLGE_MAC_VLAN_ADD) { + if (!resp_code || resp_code == 1) + return 0; + else if (resp_code == HCLGE_ADD_UC_OVERFLOW || + resp_code == HCLGE_ADD_MC_OVERFLOW) + return -ENOSPC; + + dev_err(&hdev->pdev->dev, + "add mac addr failed for undefined, code=%u.\n", + resp_code); + return -EIO; + } else if (op == HCLGE_MAC_VLAN_REMOVE) { + if (!resp_code) { + return 0; + } else if (resp_code == 1) { + dev_dbg(&hdev->pdev->dev, + "remove mac addr failed for miss.\n"); + return -ENOENT; + } + + dev_err(&hdev->pdev->dev, + "remove mac addr failed for undefined, code=%u.\n", + resp_code); + return -EIO; + } else if (op == HCLGE_MAC_VLAN_LKUP) { + if (!resp_code) { + return 0; + } else if (resp_code == 1) { + dev_dbg(&hdev->pdev->dev, + "lookup mac addr failed for miss.\n"); + return -ENOENT; + } + + dev_err(&hdev->pdev->dev, + "lookup mac addr failed for undefined, code=%u.\n", + resp_code); + return -EIO; + } + + dev_err(&hdev->pdev->dev, + "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op); + + return -EINVAL; +} + +static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr) +{ +#define HCLGE_VF_NUM_IN_FIRST_DESC 192 + + unsigned int word_num; + unsigned int bit_num; + + if (vfid > 255 || vfid < 0) + return -EIO; + + if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) { + word_num = vfid / 32; + bit_num = vfid % 32; + if (clr) + desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num)); + else + desc[1].data[word_num] |= cpu_to_le32(1 << bit_num); + } else { + word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32; + bit_num = vfid % 32; + if (clr) + desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num)); + else + desc[2].data[word_num] |= cpu_to_le32(1 << bit_num); + } + + return 0; +} + +static bool hclge_is_all_function_id_zero(struct hclge_desc *desc) +{ +#define HCLGE_DESC_NUMBER 3 +#define HCLGE_FUNC_NUMBER_PER_DESC 6 + int i, j; + + for (i = 1; i < HCLGE_DESC_NUMBER; i++) + for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++) + if (desc[i].data[j]) + return false; + + return true; +} + +static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req, + const u8 *addr, bool is_mc) +{ + const unsigned char *mac_addr = addr; + u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) | + (mac_addr[0]) | (mac_addr[1] << 8); + u32 low_val = mac_addr[4] | (mac_addr[5] << 8); + + hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); + if (is_mc) { + hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); + hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1); + } + + new_req->mac_addr_hi32 = cpu_to_le32(high_val); + new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff); +} + +static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport, + struct hclge_mac_vlan_tbl_entry_cmd *req) +{ + struct hclge_dev *hdev = vport->back; + struct hclge_desc desc; + u8 resp_code; + u16 retval; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false); + + memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "del mac addr failed for cmd_send, ret =%d.\n", + ret); + return ret; + } + resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; + retval = le16_to_cpu(desc.retval); + + return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code, + HCLGE_MAC_VLAN_REMOVE); +} + +static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport, + struct hclge_mac_vlan_tbl_entry_cmd *req, + struct hclge_desc *desc, + bool is_mc) +{ + struct hclge_dev *hdev = vport->back; + u8 resp_code; + u16 retval; + int ret; + + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true); + if (is_mc) { + desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + memcpy(desc[0].data, + req, + sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); + hclge_cmd_setup_basic_desc(&desc[1], + HCLGE_OPC_MAC_VLAN_ADD, + true); + desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[2], + HCLGE_OPC_MAC_VLAN_ADD, + true); + ret = hclge_cmd_send(&hdev->hw, desc, 3); + } else { + memcpy(desc[0].data, + req, + sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); + ret = hclge_cmd_send(&hdev->hw, desc, 1); + } + if (ret) { + dev_err(&hdev->pdev->dev, + "lookup mac addr failed for cmd_send, ret =%d.\n", + ret); + return ret; + } + resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff; + retval = le16_to_cpu(desc[0].retval); + + return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code, + HCLGE_MAC_VLAN_LKUP); +} + +static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport, + struct hclge_mac_vlan_tbl_entry_cmd *req, + struct hclge_desc *mc_desc) +{ + struct hclge_dev *hdev = vport->back; + int cfg_status; + u8 resp_code; + u16 retval; + int ret; + + if (!mc_desc) { + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, + HCLGE_OPC_MAC_VLAN_ADD, + false); + memcpy(desc.data, req, + sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; + retval = le16_to_cpu(desc.retval); + + cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval, + resp_code, + HCLGE_MAC_VLAN_ADD); + } else { + hclge_comm_cmd_reuse_desc(&mc_desc[0], false); + mc_desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + hclge_comm_cmd_reuse_desc(&mc_desc[1], false); + mc_desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + hclge_comm_cmd_reuse_desc(&mc_desc[2], false); + mc_desc[2].flag &= cpu_to_le16(~HCLGE_COMM_CMD_FLAG_NEXT); + memcpy(mc_desc[0].data, req, + sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); + ret = hclge_cmd_send(&hdev->hw, mc_desc, 3); + resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff; + retval = le16_to_cpu(mc_desc[0].retval); + + cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval, + resp_code, + HCLGE_MAC_VLAN_ADD); + } + + if (ret) { + dev_err(&hdev->pdev->dev, + "add mac addr failed for cmd_send, ret =%d.\n", + ret); + return ret; + } + + return cfg_status; +} + +static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size, + u16 *allocated_size) +{ + struct hclge_umv_spc_alc_cmd *req; + struct hclge_desc desc; + int ret; + + req = (struct hclge_umv_spc_alc_cmd *)desc.data; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false); + + req->space_size = cpu_to_le32(space_size); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n", + ret); + return ret; + } + + *allocated_size = le32_to_cpu(desc.data[1]); + + return 0; +} + +static int hclge_init_umv_space(struct hclge_dev *hdev) +{ + u16 allocated_size = 0; + int ret; + + ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size); + if (ret) + return ret; + + if (allocated_size < hdev->wanted_umv_size) + dev_warn(&hdev->pdev->dev, + "failed to alloc umv space, want %u, get %u\n", + hdev->wanted_umv_size, allocated_size); + + hdev->max_umv_size = allocated_size; + hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1); + hdev->share_umv_size = hdev->priv_umv_size + + hdev->max_umv_size % (hdev->num_alloc_vport + 1); + + if (hdev->ae_dev->dev_specs.mc_mac_size) + set_bit(HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, hdev->ae_dev->caps); + + return 0; +} + +static void hclge_reset_umv_space(struct hclge_dev *hdev) +{ + struct hclge_vport *vport; + int i; + + for (i = 0; i < hdev->num_alloc_vport; i++) { + vport = &hdev->vport[i]; + vport->used_umv_num = 0; + } + + mutex_lock(&hdev->vport_lock); + hdev->share_umv_size = hdev->priv_umv_size + + hdev->max_umv_size % (hdev->num_alloc_vport + 1); + mutex_unlock(&hdev->vport_lock); + + hdev->used_mc_mac_num = 0; +} + +static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock) +{ + struct hclge_dev *hdev = vport->back; + bool is_full; + + if (need_lock) + mutex_lock(&hdev->vport_lock); + + is_full = (vport->used_umv_num >= hdev->priv_umv_size && + hdev->share_umv_size == 0); + + if (need_lock) + mutex_unlock(&hdev->vport_lock); + + return is_full; +} + +static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free) +{ + struct hclge_dev *hdev = vport->back; + + if (is_free) { + if (vport->used_umv_num > hdev->priv_umv_size) + hdev->share_umv_size++; + + if (vport->used_umv_num > 0) + vport->used_umv_num--; + } else { + if (vport->used_umv_num >= hdev->priv_umv_size && + hdev->share_umv_size > 0) + hdev->share_umv_size--; + vport->used_umv_num++; + } +} + +static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list, + const u8 *mac_addr) +{ + struct hclge_mac_node *mac_node, *tmp; + + list_for_each_entry_safe(mac_node, tmp, list, node) + if (ether_addr_equal(mac_addr, mac_node->mac_addr)) + return mac_node; + + return NULL; +} + +static void hclge_update_mac_node(struct hclge_mac_node *mac_node, + enum HCLGE_MAC_NODE_STATE state) +{ + switch (state) { + /* from set_rx_mode or tmp_add_list */ + case HCLGE_MAC_TO_ADD: + if (mac_node->state == HCLGE_MAC_TO_DEL) + mac_node->state = HCLGE_MAC_ACTIVE; + break; + /* only from set_rx_mode */ + case HCLGE_MAC_TO_DEL: + if (mac_node->state == HCLGE_MAC_TO_ADD) { + list_del(&mac_node->node); + kfree(mac_node); + } else { + mac_node->state = HCLGE_MAC_TO_DEL; + } + break; + /* only from tmp_add_list, the mac_node->state won't be + * ACTIVE. + */ + case HCLGE_MAC_ACTIVE: + if (mac_node->state == HCLGE_MAC_TO_ADD) + mac_node->state = HCLGE_MAC_ACTIVE; + + break; + } +} + +int hclge_update_mac_list(struct hclge_vport *vport, + enum HCLGE_MAC_NODE_STATE state, + enum HCLGE_MAC_ADDR_TYPE mac_type, + const unsigned char *addr) +{ + char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; + struct hclge_dev *hdev = vport->back; + struct hclge_mac_node *mac_node; + struct list_head *list; + + list = (mac_type == HCLGE_MAC_ADDR_UC) ? + &vport->uc_mac_list : &vport->mc_mac_list; + + spin_lock_bh(&vport->mac_list_lock); + + /* if the mac addr is already in the mac list, no need to add a new + * one into it, just check the mac addr state, convert it to a new + * state, or just remove it, or do nothing. + */ + mac_node = hclge_find_mac_node(list, addr); + if (mac_node) { + hclge_update_mac_node(mac_node, state); + spin_unlock_bh(&vport->mac_list_lock); + set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state); + return 0; + } + + /* if this address is never added, unnecessary to delete */ + if (state == HCLGE_MAC_TO_DEL) { + spin_unlock_bh(&vport->mac_list_lock); + hnae3_format_mac_addr(format_mac_addr, addr); + dev_err(&hdev->pdev->dev, + "failed to delete address %s from mac list\n", + format_mac_addr); + return -ENOENT; + } + + mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC); + if (!mac_node) { + spin_unlock_bh(&vport->mac_list_lock); + return -ENOMEM; + } + + set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state); + + mac_node->state = state; + ether_addr_copy(mac_node->mac_addr, addr); + list_add_tail(&mac_node->node, list); + + spin_unlock_bh(&vport->mac_list_lock); + + return 0; +} + +static int hclge_add_uc_addr(struct hnae3_handle *handle, + const unsigned char *addr) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + + return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC, + addr); +} + +int hclge_add_uc_addr_common(struct hclge_vport *vport, + const unsigned char *addr) +{ + char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; + struct hclge_dev *hdev = vport->back; + struct hclge_mac_vlan_tbl_entry_cmd req; + struct hclge_desc desc; + u16 egress_port = 0; + int ret; + + /* mac addr check */ + if (is_zero_ether_addr(addr) || + is_broadcast_ether_addr(addr) || + is_multicast_ether_addr(addr)) { + hnae3_format_mac_addr(format_mac_addr, addr); + dev_err(&hdev->pdev->dev, + "Set_uc mac err! invalid mac:%s. is_zero:%d,is_br=%d,is_mul=%d\n", + format_mac_addr, is_zero_ether_addr(addr), + is_broadcast_ether_addr(addr), + is_multicast_ether_addr(addr)); + return -EINVAL; + } + + memset(&req, 0, sizeof(req)); + + hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M, + HCLGE_MAC_EPORT_VFID_S, vport->vport_id); + + req.egress_port = cpu_to_le16(egress_port); + + hclge_prepare_mac_addr(&req, addr, false); + + /* Lookup the mac address in the mac_vlan table, and add + * it if the entry is inexistent. Repeated unicast entry + * is not allowed in the mac vlan table. + */ + ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false); + if (ret == -ENOENT) { + mutex_lock(&hdev->vport_lock); + if (!hclge_is_umv_space_full(vport, false)) { + ret = hclge_add_mac_vlan_tbl(vport, &req, NULL); + if (!ret) + hclge_update_umv_space(vport, false); + mutex_unlock(&hdev->vport_lock); + return ret; + } + mutex_unlock(&hdev->vport_lock); + + if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE)) + dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n", + hdev->priv_umv_size); + + return -ENOSPC; + } + + /* check if we just hit the duplicate */ + if (!ret) + return -EEXIST; + + return ret; +} + +static int hclge_rm_uc_addr(struct hnae3_handle *handle, + const unsigned char *addr) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + + return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC, + addr); +} + +int hclge_rm_uc_addr_common(struct hclge_vport *vport, + const unsigned char *addr) +{ + char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; + struct hclge_dev *hdev = vport->back; + struct hclge_mac_vlan_tbl_entry_cmd req; + int ret; + + /* mac addr check */ + if (is_zero_ether_addr(addr) || + is_broadcast_ether_addr(addr) || + is_multicast_ether_addr(addr)) { + hnae3_format_mac_addr(format_mac_addr, addr); + dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%s.\n", + format_mac_addr); + return -EINVAL; + } + + memset(&req, 0, sizeof(req)); + hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); + hclge_prepare_mac_addr(&req, addr, false); + ret = hclge_remove_mac_vlan_tbl(vport, &req); + if (!ret || ret == -ENOENT) { + mutex_lock(&hdev->vport_lock); + hclge_update_umv_space(vport, true); + mutex_unlock(&hdev->vport_lock); + return 0; + } + + return ret; +} + +static int hclge_add_mc_addr(struct hnae3_handle *handle, + const unsigned char *addr) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + + return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC, + addr); +} + +int hclge_add_mc_addr_common(struct hclge_vport *vport, + const unsigned char *addr) +{ + char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; + struct hclge_dev *hdev = vport->back; + struct hclge_mac_vlan_tbl_entry_cmd req; + struct hclge_desc desc[3]; + bool is_new_addr = false; + int status; + + /* mac addr check */ + if (!is_multicast_ether_addr(addr)) { + hnae3_format_mac_addr(format_mac_addr, addr); + dev_err(&hdev->pdev->dev, + "Add mc mac err! invalid mac:%s.\n", + format_mac_addr); + return -EINVAL; + } + memset(&req, 0, sizeof(req)); + hclge_prepare_mac_addr(&req, addr, true); + status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); + if (status) { + if (hnae3_ae_dev_mc_mac_mng_supported(hdev->ae_dev) && + hdev->used_mc_mac_num >= + hdev->ae_dev->dev_specs.mc_mac_size) + goto err_no_space; + + is_new_addr = true; + + /* This mac addr do not exist, add new entry for it */ + memset(desc[0].data, 0, sizeof(desc[0].data)); + memset(desc[1].data, 0, sizeof(desc[0].data)); + memset(desc[2].data, 0, sizeof(desc[0].data)); + } + status = hclge_update_desc_vfid(desc, vport->vport_id, false); + if (status) + return status; + status = hclge_add_mac_vlan_tbl(vport, &req, desc); + if (status == -ENOSPC) + goto err_no_space; + else if (!status && is_new_addr) + hdev->used_mc_mac_num++; + + return status; + +err_no_space: + /* if already overflow, not to print each time */ + if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE)) { + vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE; + dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n"); + } + + return -ENOSPC; +} + +static int hclge_rm_mc_addr(struct hnae3_handle *handle, + const unsigned char *addr) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + + return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC, + addr); +} + +int hclge_rm_mc_addr_common(struct hclge_vport *vport, + const unsigned char *addr) +{ + char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; + struct hclge_dev *hdev = vport->back; + struct hclge_mac_vlan_tbl_entry_cmd req; + enum hclge_comm_cmd_status status; + struct hclge_desc desc[3]; + + /* mac addr check */ + if (!is_multicast_ether_addr(addr)) { + hnae3_format_mac_addr(format_mac_addr, addr); + dev_dbg(&hdev->pdev->dev, + "Remove mc mac err! invalid mac:%s.\n", + format_mac_addr); + return -EINVAL; + } + + memset(&req, 0, sizeof(req)); + hclge_prepare_mac_addr(&req, addr, true); + status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); + if (!status) { + /* This mac addr exist, remove this handle's VFID for it */ + status = hclge_update_desc_vfid(desc, vport->vport_id, true); + if (status) + return status; + + if (hclge_is_all_function_id_zero(desc)) { + /* All the vfid is zero, so need to delete this entry */ + status = hclge_remove_mac_vlan_tbl(vport, &req); + if (!status) + hdev->used_mc_mac_num--; + } else { + /* Not all the vfid is zero, update the vfid */ + status = hclge_add_mac_vlan_tbl(vport, &req, desc); + } + } else if (status == -ENOENT) { + status = 0; + } + + return status; +} + +static void hclge_sync_vport_mac_list(struct hclge_vport *vport, + struct list_head *list, + enum HCLGE_MAC_ADDR_TYPE mac_type) +{ + int (*sync)(struct hclge_vport *vport, const unsigned char *addr); + struct hclge_mac_node *mac_node, *tmp; + int ret; + + if (mac_type == HCLGE_MAC_ADDR_UC) + sync = hclge_add_uc_addr_common; + else + sync = hclge_add_mc_addr_common; + + list_for_each_entry_safe(mac_node, tmp, list, node) { + ret = sync(vport, mac_node->mac_addr); + if (!ret) { + mac_node->state = HCLGE_MAC_ACTIVE; + } else { + set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, + &vport->state); + + /* If one unicast mac address is existing in hardware, + * we need to try whether other unicast mac addresses + * are new addresses that can be added. + * Multicast mac address can be reusable, even though + * there is no space to add new multicast mac address, + * we should check whether other mac addresses are + * existing in hardware for reuse. + */ + if ((mac_type == HCLGE_MAC_ADDR_UC && ret != -EEXIST) || + (mac_type == HCLGE_MAC_ADDR_MC && ret != -ENOSPC)) + break; + } + } +} + +static void hclge_unsync_vport_mac_list(struct hclge_vport *vport, + struct list_head *list, + enum HCLGE_MAC_ADDR_TYPE mac_type) +{ + int (*unsync)(struct hclge_vport *vport, const unsigned char *addr); + struct hclge_mac_node *mac_node, *tmp; + int ret; + + if (mac_type == HCLGE_MAC_ADDR_UC) + unsync = hclge_rm_uc_addr_common; + else + unsync = hclge_rm_mc_addr_common; + + list_for_each_entry_safe(mac_node, tmp, list, node) { + ret = unsync(vport, mac_node->mac_addr); + if (!ret || ret == -ENOENT) { + list_del(&mac_node->node); + kfree(mac_node); + } else { + set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, + &vport->state); + break; + } + } +} + +static bool hclge_sync_from_add_list(struct list_head *add_list, + struct list_head *mac_list) +{ + struct hclge_mac_node *mac_node, *tmp, *new_node; + bool all_added = true; + + list_for_each_entry_safe(mac_node, tmp, add_list, node) { + if (mac_node->state == HCLGE_MAC_TO_ADD) + all_added = false; + + /* if the mac address from tmp_add_list is not in the + * uc/mc_mac_list, it means have received a TO_DEL request + * during the time window of adding the mac address into mac + * table. if mac_node state is ACTIVE, then change it to TO_DEL, + * then it will be removed at next time. else it must be TO_ADD, + * this address hasn't been added into mac table, + * so just remove the mac node. + */ + new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr); + if (new_node) { + hclge_update_mac_node(new_node, mac_node->state); + list_del(&mac_node->node); + kfree(mac_node); + } else if (mac_node->state == HCLGE_MAC_ACTIVE) { + mac_node->state = HCLGE_MAC_TO_DEL; + list_move_tail(&mac_node->node, mac_list); + } else { + list_del(&mac_node->node); + kfree(mac_node); + } + } + + return all_added; +} + +static void hclge_sync_from_del_list(struct list_head *del_list, + struct list_head *mac_list) +{ + struct hclge_mac_node *mac_node, *tmp, *new_node; + + list_for_each_entry_safe(mac_node, tmp, del_list, node) { + new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr); + if (new_node) { + /* If the mac addr exists in the mac list, it means + * received a new TO_ADD request during the time window + * of configuring the mac address. For the mac node + * state is TO_ADD, and the address is already in the + * in the hardware(due to delete fail), so we just need + * to change the mac node state to ACTIVE. + */ + new_node->state = HCLGE_MAC_ACTIVE; + list_del(&mac_node->node); + kfree(mac_node); + } else { + list_move_tail(&mac_node->node, mac_list); + } + } +} + +static void hclge_update_overflow_flags(struct hclge_vport *vport, + enum HCLGE_MAC_ADDR_TYPE mac_type, + bool is_all_added) +{ + if (mac_type == HCLGE_MAC_ADDR_UC) { + if (is_all_added) + vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE; + else if (hclge_is_umv_space_full(vport, true)) + vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE; + } else { + if (is_all_added) + vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE; + else + vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE; + } +} + +static void hclge_sync_vport_mac_table(struct hclge_vport *vport, + enum HCLGE_MAC_ADDR_TYPE mac_type) +{ + struct hclge_mac_node *mac_node, *tmp, *new_node; + struct list_head tmp_add_list, tmp_del_list; + struct list_head *list; + bool all_added; + + INIT_LIST_HEAD(&tmp_add_list); + INIT_LIST_HEAD(&tmp_del_list); + + /* move the mac addr to the tmp_add_list and tmp_del_list, then + * we can add/delete these mac addr outside the spin lock + */ + list = (mac_type == HCLGE_MAC_ADDR_UC) ? + &vport->uc_mac_list : &vport->mc_mac_list; + + spin_lock_bh(&vport->mac_list_lock); + + list_for_each_entry_safe(mac_node, tmp, list, node) { + switch (mac_node->state) { + case HCLGE_MAC_TO_DEL: + list_move_tail(&mac_node->node, &tmp_del_list); + break; + case HCLGE_MAC_TO_ADD: + new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC); + if (!new_node) + goto stop_traverse; + ether_addr_copy(new_node->mac_addr, mac_node->mac_addr); + new_node->state = mac_node->state; + list_add_tail(&new_node->node, &tmp_add_list); + break; + default: + break; + } + } + +stop_traverse: + spin_unlock_bh(&vport->mac_list_lock); + + /* delete first, in order to get max mac table space for adding */ + hclge_unsync_vport_mac_list(vport, &tmp_del_list, mac_type); + hclge_sync_vport_mac_list(vport, &tmp_add_list, mac_type); + + /* if some mac addresses were added/deleted fail, move back to the + * mac_list, and retry at next time. + */ + spin_lock_bh(&vport->mac_list_lock); + + hclge_sync_from_del_list(&tmp_del_list, list); + all_added = hclge_sync_from_add_list(&tmp_add_list, list); + + spin_unlock_bh(&vport->mac_list_lock); + + hclge_update_overflow_flags(vport, mac_type, all_added); +} + +static bool hclge_need_sync_mac_table(struct hclge_vport *vport) +{ + struct hclge_dev *hdev = vport->back; + + if (test_bit(vport->vport_id, hdev->vport_config_block)) + return false; + + if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state)) + return true; + + return false; +} + +static void hclge_sync_mac_table(struct hclge_dev *hdev) +{ + int i; + + for (i = 0; i < hdev->num_alloc_vport; i++) { + struct hclge_vport *vport = &hdev->vport[i]; + + if (!hclge_need_sync_mac_table(vport)) + continue; + + hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC); + hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC); + } +} + +static void hclge_build_del_list(struct list_head *list, + bool is_del_list, + struct list_head *tmp_del_list) +{ + struct hclge_mac_node *mac_cfg, *tmp; + + list_for_each_entry_safe(mac_cfg, tmp, list, node) { + switch (mac_cfg->state) { + case HCLGE_MAC_TO_DEL: + case HCLGE_MAC_ACTIVE: + list_move_tail(&mac_cfg->node, tmp_del_list); + break; + case HCLGE_MAC_TO_ADD: + if (is_del_list) { + list_del(&mac_cfg->node); + kfree(mac_cfg); + } + break; + } + } +} + +static void hclge_unsync_del_list(struct hclge_vport *vport, + int (*unsync)(struct hclge_vport *vport, + const unsigned char *addr), + bool is_del_list, + struct list_head *tmp_del_list) +{ + struct hclge_mac_node *mac_cfg, *tmp; + int ret; + + list_for_each_entry_safe(mac_cfg, tmp, tmp_del_list, node) { + ret = unsync(vport, mac_cfg->mac_addr); + if (!ret || ret == -ENOENT) { + /* clear all mac addr from hardware, but remain these + * mac addr in the mac list, and restore them after + * vf reset finished. + */ + if (!is_del_list && + mac_cfg->state == HCLGE_MAC_ACTIVE) { + mac_cfg->state = HCLGE_MAC_TO_ADD; + } else { + list_del(&mac_cfg->node); + kfree(mac_cfg); + } + } else if (is_del_list) { + mac_cfg->state = HCLGE_MAC_TO_DEL; + } + } +} + +void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list, + enum HCLGE_MAC_ADDR_TYPE mac_type) +{ + int (*unsync)(struct hclge_vport *vport, const unsigned char *addr); + struct hclge_dev *hdev = vport->back; + struct list_head tmp_del_list, *list; + + if (mac_type == HCLGE_MAC_ADDR_UC) { + list = &vport->uc_mac_list; + unsync = hclge_rm_uc_addr_common; + } else { + list = &vport->mc_mac_list; + unsync = hclge_rm_mc_addr_common; + } + + INIT_LIST_HEAD(&tmp_del_list); + + if (!is_del_list) + set_bit(vport->vport_id, hdev->vport_config_block); + + spin_lock_bh(&vport->mac_list_lock); + + hclge_build_del_list(list, is_del_list, &tmp_del_list); + + spin_unlock_bh(&vport->mac_list_lock); + + hclge_unsync_del_list(vport, unsync, is_del_list, &tmp_del_list); + + spin_lock_bh(&vport->mac_list_lock); + + hclge_sync_from_del_list(&tmp_del_list, list); + + spin_unlock_bh(&vport->mac_list_lock); +} + +/* remove all mac address when uninitailize */ +static void hclge_uninit_vport_mac_list(struct hclge_vport *vport, + enum HCLGE_MAC_ADDR_TYPE mac_type) +{ + struct hclge_mac_node *mac_node, *tmp; + struct hclge_dev *hdev = vport->back; + struct list_head tmp_del_list, *list; + + INIT_LIST_HEAD(&tmp_del_list); + + list = (mac_type == HCLGE_MAC_ADDR_UC) ? + &vport->uc_mac_list : &vport->mc_mac_list; + + spin_lock_bh(&vport->mac_list_lock); + + list_for_each_entry_safe(mac_node, tmp, list, node) { + switch (mac_node->state) { + case HCLGE_MAC_TO_DEL: + case HCLGE_MAC_ACTIVE: + list_move_tail(&mac_node->node, &tmp_del_list); + break; + case HCLGE_MAC_TO_ADD: + list_del(&mac_node->node); + kfree(mac_node); + break; + } + } + + spin_unlock_bh(&vport->mac_list_lock); + + hclge_unsync_vport_mac_list(vport, &tmp_del_list, mac_type); + + if (!list_empty(&tmp_del_list)) + dev_warn(&hdev->pdev->dev, + "uninit %s mac list for vport %u not completely.\n", + mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc", + vport->vport_id); + + list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) { + list_del(&mac_node->node); + kfree(mac_node); + } +} + +static void hclge_uninit_mac_table(struct hclge_dev *hdev) +{ + struct hclge_vport *vport; + int i; + + for (i = 0; i < hdev->num_alloc_vport; i++) { + vport = &hdev->vport[i]; + hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC); + hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC); + } +} + +static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev, + u16 cmdq_resp, u8 resp_code) +{ +#define HCLGE_ETHERTYPE_SUCCESS_ADD 0 +#define HCLGE_ETHERTYPE_ALREADY_ADD 1 +#define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2 +#define HCLGE_ETHERTYPE_KEY_CONFLICT 3 + + int return_status; + + if (cmdq_resp) { + dev_err(&hdev->pdev->dev, + "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n", + cmdq_resp); + return -EIO; + } + + switch (resp_code) { + case HCLGE_ETHERTYPE_SUCCESS_ADD: + case HCLGE_ETHERTYPE_ALREADY_ADD: + return_status = 0; + break; + case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW: + dev_err(&hdev->pdev->dev, + "add mac ethertype failed for manager table overflow.\n"); + return_status = -EIO; + break; + case HCLGE_ETHERTYPE_KEY_CONFLICT: + dev_err(&hdev->pdev->dev, + "add mac ethertype failed for key conflict.\n"); + return_status = -EIO; + break; + default: + dev_err(&hdev->pdev->dev, + "add mac ethertype failed for undefined, code=%u.\n", + resp_code); + return_status = -EIO; + } + + return return_status; +} + +static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf, + u8 *mac_addr) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; + struct hclge_dev *hdev = vport->back; + + vport = hclge_get_vf_vport(hdev, vf); + if (!vport) + return -EINVAL; + + hnae3_format_mac_addr(format_mac_addr, mac_addr); + if (ether_addr_equal(mac_addr, vport->vf_info.mac)) { + dev_info(&hdev->pdev->dev, + "Specified MAC(=%s) is same as before, no change committed!\n", + format_mac_addr); + return 0; + } + + ether_addr_copy(vport->vf_info.mac, mac_addr); + + /* there is a timewindow for PF to know VF unalive, it may + * cause send mailbox fail, but it doesn't matter, VF will + * query it when reinit. + */ + if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) { + dev_info(&hdev->pdev->dev, + "MAC of VF %d has been set to %s, and it will be reinitialized!\n", + vf, format_mac_addr); + (void)hclge_inform_reset_assert_to_vf(vport); + return 0; + } + + dev_info(&hdev->pdev->dev, + "MAC of VF %d has been set to %s, will be active after VF reset\n", + vf, format_mac_addr); + return 0; +} + +static int hclge_add_mgr_tbl(struct hclge_dev *hdev, + const struct hclge_mac_mgr_tbl_entry_cmd *req) +{ + struct hclge_desc desc; + u8 resp_code; + u16 retval; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false); + memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd)); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "add mac ethertype failed for cmd_send, ret =%d.\n", + ret); + return ret; + } + + resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; + retval = le16_to_cpu(desc.retval); + + return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code); +} + +static int init_mgr_tbl(struct hclge_dev *hdev) +{ + int ret; + int i; + + for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) { + ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]); + if (ret) { + dev_err(&hdev->pdev->dev, + "add mac ethertype failed, ret =%d.\n", + ret); + return ret; + } + } + + return 0; +} + +static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + ether_addr_copy(p, hdev->hw.mac.mac_addr); +} + +int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport, + const u8 *old_addr, const u8 *new_addr) +{ + struct list_head *list = &vport->uc_mac_list; + struct hclge_mac_node *old_node, *new_node; + + new_node = hclge_find_mac_node(list, new_addr); + if (!new_node) { + new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC); + if (!new_node) + return -ENOMEM; + + new_node->state = HCLGE_MAC_TO_ADD; + ether_addr_copy(new_node->mac_addr, new_addr); + list_add(&new_node->node, list); + } else { + if (new_node->state == HCLGE_MAC_TO_DEL) + new_node->state = HCLGE_MAC_ACTIVE; + + /* make sure the new addr is in the list head, avoid dev + * addr may be not re-added into mac table for the umv space + * limitation after global/imp reset which will clear mac + * table by hardware. + */ + list_move(&new_node->node, list); + } + + if (old_addr && !ether_addr_equal(old_addr, new_addr)) { + old_node = hclge_find_mac_node(list, old_addr); + if (old_node) { + if (old_node->state == HCLGE_MAC_TO_ADD) { + list_del(&old_node->node); + kfree(old_node); + } else { + old_node->state = HCLGE_MAC_TO_DEL; + } + } + } + + set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state); + + return 0; +} + +static int hclge_set_mac_addr(struct hnae3_handle *handle, const void *p, + bool is_first) +{ + const unsigned char *new_addr = (const unsigned char *)p; + struct hclge_vport *vport = hclge_get_vport(handle); + char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; + struct hclge_dev *hdev = vport->back; + unsigned char *old_addr = NULL; + int ret; + + /* mac addr check */ + if (is_zero_ether_addr(new_addr) || + is_broadcast_ether_addr(new_addr) || + is_multicast_ether_addr(new_addr)) { + hnae3_format_mac_addr(format_mac_addr, new_addr); + dev_err(&hdev->pdev->dev, + "change uc mac err! invalid mac: %s.\n", + format_mac_addr); + return -EINVAL; + } + + ret = hclge_pause_addr_cfg(hdev, new_addr); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to configure mac pause address, ret = %d\n", + ret); + return ret; + } + + if (!is_first) + old_addr = hdev->hw.mac.mac_addr; + + spin_lock_bh(&vport->mac_list_lock); + ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr); + if (ret) { + hnae3_format_mac_addr(format_mac_addr, new_addr); + dev_err(&hdev->pdev->dev, + "failed to change the mac addr:%s, ret = %d\n", + format_mac_addr, ret); + spin_unlock_bh(&vport->mac_list_lock); + + if (!is_first) + hclge_pause_addr_cfg(hdev, old_addr); + + return ret; + } + /* we must update dev addr with spin lock protect, preventing dev addr + * being removed by set_rx_mode path. + */ + ether_addr_copy(hdev->hw.mac.mac_addr, new_addr); + spin_unlock_bh(&vport->mac_list_lock); + + hclge_task_schedule(hdev, 0); + + return 0; +} + +static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd) +{ + struct mii_ioctl_data *data = if_mii(ifr); + + if (!hnae3_dev_phy_imp_supported(hdev)) + return -EOPNOTSUPP; + + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = hdev->hw.mac.phy_addr; + /* this command reads phy id and register at the same time */ + fallthrough; + case SIOCGMIIREG: + data->val_out = hclge_read_phy_reg(hdev, data->reg_num); + return 0; + + case SIOCSMIIREG: + return hclge_write_phy_reg(hdev, data->reg_num, data->val_in); + default: + return -EOPNOTSUPP; + } +} + +static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr, + int cmd) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + switch (cmd) { + case SIOCGHWTSTAMP: + return hclge_ptp_get_cfg(hdev, ifr); + case SIOCSHWTSTAMP: + return hclge_ptp_set_cfg(hdev, ifr); + default: + if (!hdev->hw.mac.phydev) + return hclge_mii_ioctl(hdev, ifr, cmd); + } + + return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd); +} + +static int hclge_set_port_vlan_filter_bypass(struct hclge_dev *hdev, u8 vf_id, + bool bypass_en) +{ + struct hclge_port_vlan_filter_bypass_cmd *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PORT_VLAN_BYPASS, false); + req = (struct hclge_port_vlan_filter_bypass_cmd *)desc.data; + req->vf_id = vf_id; + hnae3_set_bit(req->bypass_state, HCLGE_INGRESS_BYPASS_B, + bypass_en ? 1 : 0); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to set vport%u port vlan filter bypass state, ret = %d.\n", + vf_id, ret); + + return ret; +} + +static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type, + u8 fe_type, bool filter_en, u8 vf_id) +{ + struct hclge_vlan_filter_ctrl_cmd *req; + struct hclge_desc desc; + int ret; + + /* read current vlan filter parameter */ + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true); + req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data; + req->vlan_type = vlan_type; + req->vf_id = vf_id; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, "failed to get vport%u vlan filter config, ret = %d.\n", + vf_id, ret); + return ret; + } + + /* modify and write new config parameter */ + hclge_comm_cmd_reuse_desc(&desc, false); + req->vlan_fe = filter_en ? + (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, "failed to set vport%u vlan filter, ret = %d.\n", + vf_id, ret); + + return ret; +} + +static int hclge_set_vport_vlan_filter(struct hclge_vport *vport, bool enable) +{ + struct hclge_dev *hdev = vport->back; + struct hnae3_ae_dev *ae_dev = hdev->ae_dev; + int ret; + + if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) + return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, + HCLGE_FILTER_FE_EGRESS_V1_B, + enable, vport->vport_id); + + ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, + HCLGE_FILTER_FE_EGRESS, enable, + vport->vport_id); + if (ret) + return ret; + + if (test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, ae_dev->caps)) { + ret = hclge_set_port_vlan_filter_bypass(hdev, vport->vport_id, + !enable); + } else if (!vport->vport_id) { + if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps)) + enable = false; + + ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, + HCLGE_FILTER_FE_INGRESS, + enable, 0); + } + + return ret; +} + +static bool hclge_need_enable_vport_vlan_filter(struct hclge_vport *vport) +{ + struct hnae3_handle *handle = &vport->nic; + struct hclge_vport_vlan_cfg *vlan, *tmp; + struct hclge_dev *hdev = vport->back; + + if (vport->vport_id) { + if (vport->port_base_vlan_cfg.state != + HNAE3_PORT_BASE_VLAN_DISABLE) + return true; + + if (vport->vf_info.trusted && vport->vf_info.request_uc_en) + return false; + } else if (handle->netdev_flags & HNAE3_USER_UPE) { + return false; + } + + if (!vport->req_vlan_fltr_en) + return false; + + /* compatible with former device, always enable vlan filter */ + if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps)) + return true; + + list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) + if (vlan->vlan_id != 0) + return true; + + return false; +} + +int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en) +{ + struct hclge_dev *hdev = vport->back; + bool need_en; + int ret; + + mutex_lock(&hdev->vport_lock); + + vport->req_vlan_fltr_en = request_en; + + need_en = hclge_need_enable_vport_vlan_filter(vport); + if (need_en == vport->cur_vlan_fltr_en) { + mutex_unlock(&hdev->vport_lock); + return 0; + } + + ret = hclge_set_vport_vlan_filter(vport, need_en); + if (ret) { + mutex_unlock(&hdev->vport_lock); + return ret; + } + + vport->cur_vlan_fltr_en = need_en; + + mutex_unlock(&hdev->vport_lock); + + return 0; +} + +static int hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + + return hclge_enable_vport_vlan_filter(vport, enable); +} + +static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid, + bool is_kill, u16 vlan, + struct hclge_desc *desc) +{ + struct hclge_vlan_filter_vf_cfg_cmd *req0; + struct hclge_vlan_filter_vf_cfg_cmd *req1; + u8 vf_byte_val; + u8 vf_byte_off; + int ret; + + hclge_cmd_setup_basic_desc(&desc[0], + HCLGE_OPC_VLAN_FILTER_VF_CFG, false); + hclge_cmd_setup_basic_desc(&desc[1], + HCLGE_OPC_VLAN_FILTER_VF_CFG, false); + + desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + + vf_byte_off = vfid / 8; + vf_byte_val = 1 << (vfid % 8); + + req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data; + req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data; + + req0->vlan_id = cpu_to_le16(vlan); + req0->vlan_cfg = is_kill; + + if (vf_byte_off < HCLGE_MAX_VF_BYTES) + req0->vf_bitmap[vf_byte_off] = vf_byte_val; + else + req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val; + + ret = hclge_cmd_send(&hdev->hw, desc, 2); + if (ret) { + dev_err(&hdev->pdev->dev, + "Send vf vlan command fail, ret =%d.\n", + ret); + return ret; + } + + return 0; +} + +static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid, + bool is_kill, struct hclge_desc *desc) +{ + struct hclge_vlan_filter_vf_cfg_cmd *req; + + req = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data; + + if (!is_kill) { +#define HCLGE_VF_VLAN_NO_ENTRY 2 + if (!req->resp_code || req->resp_code == 1) + return 0; + + if (req->resp_code == HCLGE_VF_VLAN_NO_ENTRY) { + set_bit(vfid, hdev->vf_vlan_full); + dev_warn(&hdev->pdev->dev, + "vf vlan table is full, vf vlan filter is disabled\n"); + return 0; + } + + dev_err(&hdev->pdev->dev, + "Add vf vlan filter fail, ret =%u.\n", + req->resp_code); + } else { +#define HCLGE_VF_VLAN_DEL_NO_FOUND 1 + if (!req->resp_code) + return 0; + + /* vf vlan filter is disabled when vf vlan table is full, + * then new vlan id will not be added into vf vlan table. + * Just return 0 without warning, avoid massive verbose + * print logs when unload. + */ + if (req->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) + return 0; + + dev_err(&hdev->pdev->dev, + "Kill vf vlan filter fail, ret =%u.\n", + req->resp_code); + } + + return -EIO; +} + +static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid, + bool is_kill, u16 vlan) +{ + struct hclge_vport *vport = &hdev->vport[vfid]; + struct hclge_desc desc[2]; + int ret; + + /* if vf vlan table is full, firmware will close vf vlan filter, it + * is unable and unnecessary to add new vlan id to vf vlan filter. + * If spoof check is enable, and vf vlan is full, it shouldn't add + * new vlan, because tx packets with these vlan id will be dropped. + */ + if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) { + if (vport->vf_info.spoofchk && vlan) { + dev_err(&hdev->pdev->dev, + "Can't add vlan due to spoof check is on and vf vlan table is full\n"); + return -EPERM; + } + return 0; + } + + ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc); + if (ret) + return ret; + + return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc); +} + +static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto, + u16 vlan_id, bool is_kill) +{ + struct hclge_vlan_filter_pf_cfg_cmd *req; + struct hclge_desc desc; + u8 vlan_offset_byte_val; + u8 vlan_offset_byte; + u8 vlan_offset_160; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false); + + vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP; + vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) / + HCLGE_VLAN_BYTE_SIZE; + vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE); + + req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data; + req->vlan_offset = vlan_offset_160; + req->vlan_cfg = is_kill; + req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "port vlan command, send fail, ret =%d.\n", ret); + return ret; +} + +static bool hclge_need_update_port_vlan(struct hclge_dev *hdev, u16 vport_id, + u16 vlan_id, bool is_kill) +{ + /* vlan 0 may be added twice when 8021q module is enabled */ + if (!is_kill && !vlan_id && + test_bit(vport_id, hdev->vlan_table[vlan_id])) + return false; + + if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) { + dev_warn(&hdev->pdev->dev, + "Add port vlan failed, vport %u is already in vlan %u\n", + vport_id, vlan_id); + return false; + } + + if (is_kill && + !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) { + dev_warn(&hdev->pdev->dev, + "Delete port vlan failed, vport %u is not in vlan %u\n", + vport_id, vlan_id); + return false; + } + + return true; +} + +static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto, + u16 vport_id, u16 vlan_id, + bool is_kill) +{ + u16 vport_idx, vport_num = 0; + int ret; + + if (is_kill && !vlan_id) + return 0; + + if (vlan_id >= VLAN_N_VID) + return -EINVAL; + + ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id); + if (ret) { + dev_err(&hdev->pdev->dev, + "Set %u vport vlan filter config fail, ret =%d.\n", + vport_id, ret); + return ret; + } + + if (!hclge_need_update_port_vlan(hdev, vport_id, vlan_id, is_kill)) + return 0; + + for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM) + vport_num++; + + if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1)) + ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id, + is_kill); + + return ret; +} + +static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport) +{ + struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg; + struct hclge_vport_vtag_tx_cfg_cmd *req; + struct hclge_dev *hdev = vport->back; + struct hclge_desc desc; + u16 bmap_index; + int status; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false); + + req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data; + req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1); + req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B, + vcfg->accept_tag1 ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B, + vcfg->accept_untag1 ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B, + vcfg->accept_tag2 ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B, + vcfg->accept_untag2 ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B, + vcfg->insert_tag1_en ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B, + vcfg->insert_tag2_en ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B, + vcfg->tag_shift_mode_en ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0); + + req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; + bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD / + HCLGE_VF_NUM_PER_BYTE; + req->vf_bitmap[bmap_index] = + 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE); + + status = hclge_cmd_send(&hdev->hw, &desc, 1); + if (status) + dev_err(&hdev->pdev->dev, + "Send port txvlan cfg command fail, ret =%d\n", + status); + + return status; +} + +static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport) +{ + struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg; + struct hclge_vport_vtag_rx_cfg_cmd *req; + struct hclge_dev *hdev = vport->back; + struct hclge_desc desc; + u16 bmap_index; + int status; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false); + + req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data; + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B, + vcfg->strip_tag1_en ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B, + vcfg->strip_tag2_en ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B, + vcfg->vlan1_vlan_prionly ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B, + vcfg->vlan2_vlan_prionly ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B, + vcfg->strip_tag1_discard_en ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B, + vcfg->strip_tag2_discard_en ? 1 : 0); + + req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; + bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD / + HCLGE_VF_NUM_PER_BYTE; + req->vf_bitmap[bmap_index] = + 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE); + + status = hclge_cmd_send(&hdev->hw, &desc, 1); + if (status) + dev_err(&hdev->pdev->dev, + "Send port rxvlan cfg command fail, ret =%d\n", + status); + + return status; +} + +static int hclge_vlan_offload_cfg(struct hclge_vport *vport, + u16 port_base_vlan_state, + u16 vlan_tag, u8 qos) +{ + int ret; + + if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) { + vport->txvlan_cfg.accept_tag1 = true; + vport->txvlan_cfg.insert_tag1_en = false; + vport->txvlan_cfg.default_tag1 = 0; + } else { + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev); + + vport->txvlan_cfg.accept_tag1 = + ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3; + vport->txvlan_cfg.insert_tag1_en = true; + vport->txvlan_cfg.default_tag1 = (qos << VLAN_PRIO_SHIFT) | + vlan_tag; + } + + vport->txvlan_cfg.accept_untag1 = true; + + /* accept_tag2 and accept_untag2 are not supported on + * pdev revision(0x20), new revision support them, + * this two fields can not be configured by user. + */ + vport->txvlan_cfg.accept_tag2 = true; + vport->txvlan_cfg.accept_untag2 = true; + vport->txvlan_cfg.insert_tag2_en = false; + vport->txvlan_cfg.default_tag2 = 0; + vport->txvlan_cfg.tag_shift_mode_en = true; + + if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) { + vport->rxvlan_cfg.strip_tag1_en = false; + vport->rxvlan_cfg.strip_tag2_en = + vport->rxvlan_cfg.rx_vlan_offload_en; + vport->rxvlan_cfg.strip_tag2_discard_en = false; + } else { + vport->rxvlan_cfg.strip_tag1_en = + vport->rxvlan_cfg.rx_vlan_offload_en; + vport->rxvlan_cfg.strip_tag2_en = true; + vport->rxvlan_cfg.strip_tag2_discard_en = true; + } + + vport->rxvlan_cfg.strip_tag1_discard_en = false; + vport->rxvlan_cfg.vlan1_vlan_prionly = false; + vport->rxvlan_cfg.vlan2_vlan_prionly = false; + + ret = hclge_set_vlan_tx_offload_cfg(vport); + if (ret) + return ret; + + return hclge_set_vlan_rx_offload_cfg(vport); +} + +static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev) +{ + struct hclge_rx_vlan_type_cfg_cmd *rx_req; + struct hclge_tx_vlan_type_cfg_cmd *tx_req; + struct hclge_desc desc; + int status; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false); + rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data; + rx_req->ot_fst_vlan_type = + cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type); + rx_req->ot_sec_vlan_type = + cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type); + rx_req->in_fst_vlan_type = + cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type); + rx_req->in_sec_vlan_type = + cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type); + + status = hclge_cmd_send(&hdev->hw, &desc, 1); + if (status) { + dev_err(&hdev->pdev->dev, + "Send rxvlan protocol type command fail, ret =%d\n", + status); + return status; + } + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false); + + tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data; + tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type); + tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type); + + status = hclge_cmd_send(&hdev->hw, &desc, 1); + if (status) + dev_err(&hdev->pdev->dev, + "Send txvlan protocol type command fail, ret =%d\n", + status); + + return status; +} + +static int hclge_init_vlan_filter(struct hclge_dev *hdev) +{ + struct hclge_vport *vport; + int ret; + int i; + + if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) + return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, + HCLGE_FILTER_FE_EGRESS_V1_B, + true, 0); + + /* for revision 0x21, vf vlan filter is per function */ + for (i = 0; i < hdev->num_alloc_vport; i++) { + vport = &hdev->vport[i]; + ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, + HCLGE_FILTER_FE_EGRESS, true, + vport->vport_id); + if (ret) + return ret; + vport->cur_vlan_fltr_en = true; + } + + return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, + HCLGE_FILTER_FE_INGRESS, true, 0); +} + +static int hclge_init_vlan_type(struct hclge_dev *hdev) +{ + hdev->vlan_type_cfg.rx_in_fst_vlan_type = ETH_P_8021Q; + hdev->vlan_type_cfg.rx_in_sec_vlan_type = ETH_P_8021Q; + hdev->vlan_type_cfg.rx_ot_fst_vlan_type = ETH_P_8021Q; + hdev->vlan_type_cfg.rx_ot_sec_vlan_type = ETH_P_8021Q; + hdev->vlan_type_cfg.tx_ot_vlan_type = ETH_P_8021Q; + hdev->vlan_type_cfg.tx_in_vlan_type = ETH_P_8021Q; + + return hclge_set_vlan_protocol_type(hdev); +} + +static int hclge_init_vport_vlan_offload(struct hclge_dev *hdev) +{ + struct hclge_port_base_vlan_config *cfg; + struct hclge_vport *vport; + int ret; + int i; + + for (i = 0; i < hdev->num_alloc_vport; i++) { + vport = &hdev->vport[i]; + cfg = &vport->port_base_vlan_cfg; + + ret = hclge_vlan_offload_cfg(vport, cfg->state, + cfg->vlan_info.vlan_tag, + cfg->vlan_info.qos); + if (ret) + return ret; + } + return 0; +} + +static int hclge_init_vlan_config(struct hclge_dev *hdev) +{ + struct hnae3_handle *handle = &hdev->vport[0].nic; + int ret; + + ret = hclge_init_vlan_filter(hdev); + if (ret) + return ret; + + ret = hclge_init_vlan_type(hdev); + if (ret) + return ret; + + ret = hclge_init_vport_vlan_offload(hdev); + if (ret) + return ret; + + return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false); +} + +static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id, + bool writen_to_tbl) +{ + struct hclge_vport_vlan_cfg *vlan, *tmp; + struct hclge_dev *hdev = vport->back; + + mutex_lock(&hdev->vport_lock); + + list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { + if (vlan->vlan_id == vlan_id) { + mutex_unlock(&hdev->vport_lock); + return; + } + } + + vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); + if (!vlan) { + mutex_unlock(&hdev->vport_lock); + return; + } + + vlan->hd_tbl_status = writen_to_tbl; + vlan->vlan_id = vlan_id; + + list_add_tail(&vlan->node, &vport->vlan_list); + mutex_unlock(&hdev->vport_lock); +} + +static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport) +{ + struct hclge_vport_vlan_cfg *vlan, *tmp; + struct hclge_dev *hdev = vport->back; + int ret; + + mutex_lock(&hdev->vport_lock); + + list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { + if (!vlan->hd_tbl_status) { + ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), + vport->vport_id, + vlan->vlan_id, false); + if (ret) { + dev_err(&hdev->pdev->dev, + "restore vport vlan list failed, ret=%d\n", + ret); + + mutex_unlock(&hdev->vport_lock); + return ret; + } + } + vlan->hd_tbl_status = true; + } + + mutex_unlock(&hdev->vport_lock); + + return 0; +} + +static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id, + bool is_write_tbl) +{ + struct hclge_vport_vlan_cfg *vlan, *tmp; + struct hclge_dev *hdev = vport->back; + + list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { + if (vlan->vlan_id == vlan_id) { + if (is_write_tbl && vlan->hd_tbl_status) + hclge_set_vlan_filter_hw(hdev, + htons(ETH_P_8021Q), + vport->vport_id, + vlan_id, + true); + + list_del(&vlan->node); + kfree(vlan); + break; + } + } +} + +void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list) +{ + struct hclge_vport_vlan_cfg *vlan, *tmp; + struct hclge_dev *hdev = vport->back; + + mutex_lock(&hdev->vport_lock); + + list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { + if (vlan->hd_tbl_status) + hclge_set_vlan_filter_hw(hdev, + htons(ETH_P_8021Q), + vport->vport_id, + vlan->vlan_id, + true); + + vlan->hd_tbl_status = false; + if (is_del_list) { + list_del(&vlan->node); + kfree(vlan); + } + } + clear_bit(vport->vport_id, hdev->vf_vlan_full); + mutex_unlock(&hdev->vport_lock); +} + +void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev) +{ + struct hclge_vport_vlan_cfg *vlan, *tmp; + struct hclge_vport *vport; + int i; + + mutex_lock(&hdev->vport_lock); + + for (i = 0; i < hdev->num_alloc_vport; i++) { + vport = &hdev->vport[i]; + list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { + list_del(&vlan->node); + kfree(vlan); + } + } + + mutex_unlock(&hdev->vport_lock); +} + +void hclge_restore_vport_port_base_vlan_config(struct hclge_dev *hdev) +{ + struct hclge_vlan_info *vlan_info; + struct hclge_vport *vport; + u16 vlan_proto; + u16 vlan_id; + u16 state; + int vf_id; + int ret; + + /* PF should restore all vfs port base vlan */ + for (vf_id = 0; vf_id < hdev->num_alloc_vfs; vf_id++) { + vport = &hdev->vport[vf_id + HCLGE_VF_VPORT_START_NUM]; + vlan_info = vport->port_base_vlan_cfg.tbl_sta ? + &vport->port_base_vlan_cfg.vlan_info : + &vport->port_base_vlan_cfg.old_vlan_info; + + vlan_id = vlan_info->vlan_tag; + vlan_proto = vlan_info->vlan_proto; + state = vport->port_base_vlan_cfg.state; + + if (state != HNAE3_PORT_BASE_VLAN_DISABLE) { + clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]); + ret = hclge_set_vlan_filter_hw(hdev, htons(vlan_proto), + vport->vport_id, + vlan_id, false); + vport->port_base_vlan_cfg.tbl_sta = ret == 0; + } + } +} + +void hclge_restore_vport_vlan_table(struct hclge_vport *vport) +{ + struct hclge_vport_vlan_cfg *vlan, *tmp; + struct hclge_dev *hdev = vport->back; + int ret; + + mutex_lock(&hdev->vport_lock); + + if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) { + list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { + ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), + vport->vport_id, + vlan->vlan_id, false); + if (ret) + break; + vlan->hd_tbl_status = true; + } + } + + mutex_unlock(&hdev->vport_lock); +} + +/* For global reset and imp reset, hardware will clear the mac table, + * so we change the mac address state from ACTIVE to TO_ADD, then they + * can be restored in the service task after reset complete. Furtherly, + * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to + * be restored after reset, so just remove these mac nodes from mac_list. + */ +static void hclge_mac_node_convert_for_reset(struct list_head *list) +{ + struct hclge_mac_node *mac_node, *tmp; + + list_for_each_entry_safe(mac_node, tmp, list, node) { + if (mac_node->state == HCLGE_MAC_ACTIVE) { + mac_node->state = HCLGE_MAC_TO_ADD; + } else if (mac_node->state == HCLGE_MAC_TO_DEL) { + list_del(&mac_node->node); + kfree(mac_node); + } + } +} + +void hclge_restore_mac_table_common(struct hclge_vport *vport) +{ + spin_lock_bh(&vport->mac_list_lock); + + hclge_mac_node_convert_for_reset(&vport->uc_mac_list); + hclge_mac_node_convert_for_reset(&vport->mc_mac_list); + set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state); + + spin_unlock_bh(&vport->mac_list_lock); +} + +static void hclge_restore_hw_table(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = &hdev->vport[0]; + struct hnae3_handle *handle = &vport->nic; + + hclge_restore_mac_table_common(vport); + hclge_restore_vport_port_base_vlan_config(hdev); + hclge_restore_vport_vlan_table(vport); + set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state); + hclge_restore_fd_entries(handle); +} + +int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + + if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) { + vport->rxvlan_cfg.strip_tag1_en = false; + vport->rxvlan_cfg.strip_tag2_en = enable; + vport->rxvlan_cfg.strip_tag2_discard_en = false; + } else { + vport->rxvlan_cfg.strip_tag1_en = enable; + vport->rxvlan_cfg.strip_tag2_en = true; + vport->rxvlan_cfg.strip_tag2_discard_en = true; + } + + vport->rxvlan_cfg.strip_tag1_discard_en = false; + vport->rxvlan_cfg.vlan1_vlan_prionly = false; + vport->rxvlan_cfg.vlan2_vlan_prionly = false; + vport->rxvlan_cfg.rx_vlan_offload_en = enable; + + return hclge_set_vlan_rx_offload_cfg(vport); +} + +static void hclge_set_vport_vlan_fltr_change(struct hclge_vport *vport) +{ + struct hclge_dev *hdev = vport->back; + + if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps)) + set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, &vport->state); +} + +static int hclge_update_vlan_filter_entries(struct hclge_vport *vport, + u16 port_base_vlan_state, + struct hclge_vlan_info *new_info, + struct hclge_vlan_info *old_info) +{ + struct hclge_dev *hdev = vport->back; + int ret; + + if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) { + hclge_rm_vport_all_vlan_table(vport, false); + /* force clear VLAN 0 */ + ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, true, 0); + if (ret) + return ret; + return hclge_set_vlan_filter_hw(hdev, + htons(new_info->vlan_proto), + vport->vport_id, + new_info->vlan_tag, + false); + } + + vport->port_base_vlan_cfg.tbl_sta = false; + + /* force add VLAN 0 */ + ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, false, 0); + if (ret) + return ret; + + ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto), + vport->vport_id, old_info->vlan_tag, + true); + if (ret) + return ret; + + return hclge_add_vport_all_vlan_table(vport); +} + +static bool hclge_need_update_vlan_filter(const struct hclge_vlan_info *new_cfg, + const struct hclge_vlan_info *old_cfg) +{ + if (new_cfg->vlan_tag != old_cfg->vlan_tag) + return true; + + if (new_cfg->vlan_tag == 0 && (new_cfg->qos == 0 || old_cfg->qos == 0)) + return true; + + return false; +} + +static int hclge_modify_port_base_vlan_tag(struct hclge_vport *vport, + struct hclge_vlan_info *new_info, + struct hclge_vlan_info *old_info) +{ + struct hclge_dev *hdev = vport->back; + int ret; + + /* add new VLAN tag */ + ret = hclge_set_vlan_filter_hw(hdev, htons(new_info->vlan_proto), + vport->vport_id, new_info->vlan_tag, + false); + if (ret) + return ret; + + vport->port_base_vlan_cfg.tbl_sta = false; + /* remove old VLAN tag */ + if (old_info->vlan_tag == 0) + ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, + true, 0); + else + ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), + vport->vport_id, + old_info->vlan_tag, true); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to clear vport%u port base vlan %u, ret = %d.\n", + vport->vport_id, old_info->vlan_tag, ret); + + return ret; +} + +int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state, + struct hclge_vlan_info *vlan_info) +{ + struct hnae3_handle *nic = &vport->nic; + struct hclge_vlan_info *old_vlan_info; + int ret; + + old_vlan_info = &vport->port_base_vlan_cfg.vlan_info; + + ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag, + vlan_info->qos); + if (ret) + return ret; + + if (!hclge_need_update_vlan_filter(vlan_info, old_vlan_info)) + goto out; + + if (state == HNAE3_PORT_BASE_VLAN_MODIFY) + ret = hclge_modify_port_base_vlan_tag(vport, vlan_info, + old_vlan_info); + else + ret = hclge_update_vlan_filter_entries(vport, state, vlan_info, + old_vlan_info); + if (ret) + return ret; + +out: + vport->port_base_vlan_cfg.state = state; + if (state == HNAE3_PORT_BASE_VLAN_DISABLE) + nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE; + else + nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE; + + vport->port_base_vlan_cfg.old_vlan_info = *old_vlan_info; + vport->port_base_vlan_cfg.vlan_info = *vlan_info; + vport->port_base_vlan_cfg.tbl_sta = true; + hclge_set_vport_vlan_fltr_change(vport); + + return 0; +} + +static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport, + enum hnae3_port_base_vlan_state state, + u16 vlan, u8 qos) +{ + if (state == HNAE3_PORT_BASE_VLAN_DISABLE) { + if (!vlan && !qos) + return HNAE3_PORT_BASE_VLAN_NOCHANGE; + + return HNAE3_PORT_BASE_VLAN_ENABLE; + } + + if (!vlan && !qos) + return HNAE3_PORT_BASE_VLAN_DISABLE; + + if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan && + vport->port_base_vlan_cfg.vlan_info.qos == qos) + return HNAE3_PORT_BASE_VLAN_NOCHANGE; + + return HNAE3_PORT_BASE_VLAN_MODIFY; +} + +static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid, + u16 vlan, u8 qos, __be16 proto) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hclge_vlan_info vlan_info; + u16 state; + int ret; + + if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) + return -EOPNOTSUPP; + + vport = hclge_get_vf_vport(hdev, vfid); + if (!vport) + return -EINVAL; + + /* qos is a 3 bits value, so can not be bigger than 7 */ + if (vlan > VLAN_N_VID - 1 || qos > 7) + return -EINVAL; + if (proto != htons(ETH_P_8021Q)) + return -EPROTONOSUPPORT; + + state = hclge_get_port_base_vlan_state(vport, + vport->port_base_vlan_cfg.state, + vlan, qos); + if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE) + return 0; + + vlan_info.vlan_tag = vlan; + vlan_info.qos = qos; + vlan_info.vlan_proto = ntohs(proto); + + ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to update port base vlan for vf %d, ret = %d\n", + vfid, ret); + return ret; + } + + /* there is a timewindow for PF to know VF unalive, it may + * cause send mailbox fail, but it doesn't matter, VF will + * query it when reinit. + * for DEVICE_VERSION_V3, vf doesn't need to know about the port based + * VLAN state. + */ + if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) { + if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) + (void)hclge_push_vf_port_base_vlan_info(&hdev->vport[0], + vport->vport_id, + state, + &vlan_info); + else + set_bit(HCLGE_VPORT_NEED_NOTIFY_VF_VLAN, + &vport->need_notify); + } + return 0; +} + +static void hclge_clear_vf_vlan(struct hclge_dev *hdev) +{ + struct hclge_vlan_info *vlan_info; + struct hclge_vport *vport; + int ret; + int vf; + + /* clear port base vlan for all vf */ + for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) { + vport = &hdev->vport[vf]; + vlan_info = &vport->port_base_vlan_cfg.vlan_info; + + ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), + vport->vport_id, + vlan_info->vlan_tag, true); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to clear vf vlan for vf%d, ret = %d\n", + vf - HCLGE_VF_VPORT_START_NUM, ret); + } +} + +int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, + u16 vlan_id, bool is_kill) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + bool writen_to_tbl = false; + int ret = 0; + + /* When device is resetting or reset failed, firmware is unable to + * handle mailbox. Just record the vlan id, and remove it after + * reset finished. + */ + mutex_lock(&hdev->vport_lock); + if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || + test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) { + set_bit(vlan_id, vport->vlan_del_fail_bmap); + mutex_unlock(&hdev->vport_lock); + return -EBUSY; + } else if (!is_kill && test_bit(vlan_id, vport->vlan_del_fail_bmap)) { + clear_bit(vlan_id, vport->vlan_del_fail_bmap); + } + mutex_unlock(&hdev->vport_lock); + + /* when port base vlan enabled, we use port base vlan as the vlan + * filter entry. In this case, we don't update vlan filter table + * when user add new vlan or remove exist vlan, just update the vport + * vlan list. The vlan id in vlan list will be writen in vlan filter + * table until port base vlan disabled + */ + if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) { + ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, + vlan_id, is_kill); + writen_to_tbl = true; + } + + if (!ret) { + if (!is_kill) { + hclge_add_vport_vlan_table(vport, vlan_id, + writen_to_tbl); + } else if (is_kill && vlan_id != 0) { + mutex_lock(&hdev->vport_lock); + hclge_rm_vport_vlan_table(vport, vlan_id, false); + mutex_unlock(&hdev->vport_lock); + } + } else if (is_kill) { + /* when remove hw vlan filter failed, record the vlan id, + * and try to remove it from hw later, to be consistence + * with stack + */ + mutex_lock(&hdev->vport_lock); + set_bit(vlan_id, vport->vlan_del_fail_bmap); + mutex_unlock(&hdev->vport_lock); + } + + hclge_set_vport_vlan_fltr_change(vport); + + return ret; +} + +static void hclge_sync_vlan_fltr_state(struct hclge_dev *hdev) +{ + struct hclge_vport *vport; + int ret; + u16 i; + + for (i = 0; i < hdev->num_alloc_vport; i++) { + vport = &hdev->vport[i]; + if (!test_and_clear_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, + &vport->state)) + continue; + + ret = hclge_enable_vport_vlan_filter(vport, + vport->req_vlan_fltr_en); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to sync vlan filter state for vport%u, ret = %d\n", + vport->vport_id, ret); + set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, + &vport->state); + return; + } + } +} + +static void hclge_sync_vlan_filter(struct hclge_dev *hdev) +{ +#define HCLGE_MAX_SYNC_COUNT 60 + + int i, ret, sync_cnt = 0; + u16 vlan_id; + + mutex_lock(&hdev->vport_lock); + /* start from vport 1 for PF is always alive */ + for (i = 0; i < hdev->num_alloc_vport; i++) { + struct hclge_vport *vport = &hdev->vport[i]; + + vlan_id = find_first_bit(vport->vlan_del_fail_bmap, + VLAN_N_VID); + while (vlan_id != VLAN_N_VID) { + ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), + vport->vport_id, vlan_id, + true); + if (ret && ret != -EINVAL) { + mutex_unlock(&hdev->vport_lock); + return; + } + + clear_bit(vlan_id, vport->vlan_del_fail_bmap); + hclge_rm_vport_vlan_table(vport, vlan_id, false); + hclge_set_vport_vlan_fltr_change(vport); + + sync_cnt++; + if (sync_cnt >= HCLGE_MAX_SYNC_COUNT) { + mutex_unlock(&hdev->vport_lock); + return; + } + + vlan_id = find_first_bit(vport->vlan_del_fail_bmap, + VLAN_N_VID); + } + } + mutex_unlock(&hdev->vport_lock); + + hclge_sync_vlan_fltr_state(hdev); +} + +static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps) +{ + struct hclge_config_max_frm_size_cmd *req; + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false); + + req = (struct hclge_config_max_frm_size_cmd *)desc.data; + req->max_frm_size = cpu_to_le16(new_mps); + req->min_frm_size = HCLGE_MAC_MIN_FRAME; + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + + return hclge_set_vport_mtu(vport, new_mtu); +} + +int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu) +{ + struct hclge_dev *hdev = vport->back; + int i, max_frm_size, ret; + + /* HW supprt 2 layer vlan */ + max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN; + if (max_frm_size < HCLGE_MAC_MIN_FRAME || + max_frm_size > hdev->ae_dev->dev_specs.max_frm_size) + return -EINVAL; + + max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME); + mutex_lock(&hdev->vport_lock); + /* VF's mps must fit within hdev->mps */ + if (vport->vport_id && max_frm_size > hdev->mps) { + mutex_unlock(&hdev->vport_lock); + return -EINVAL; + } else if (vport->vport_id) { + vport->mps = max_frm_size; + mutex_unlock(&hdev->vport_lock); + return 0; + } + + /* PF's mps must be greater then VF's mps */ + for (i = 1; i < hdev->num_alloc_vport; i++) + if (max_frm_size < hdev->vport[i].mps) { + dev_err(&hdev->pdev->dev, + "failed to set pf mtu for less than vport %d, mps = %u.\n", + i, hdev->vport[i].mps); + mutex_unlock(&hdev->vport_lock); + return -EINVAL; + } + + hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); + + ret = hclge_set_mac_mtu(hdev, max_frm_size); + if (ret) { + dev_err(&hdev->pdev->dev, + "Change mtu fail, ret =%d\n", ret); + goto out; + } + + hdev->mps = max_frm_size; + vport->mps = max_frm_size; + + ret = hclge_buffer_alloc(hdev); + if (ret) + dev_err(&hdev->pdev->dev, + "Allocate buffer fail, ret =%d\n", ret); + +out: + hclge_notify_client(hdev, HNAE3_UP_CLIENT); + mutex_unlock(&hdev->vport_lock); + return ret; +} + +static int hclge_reset_tqp_cmd_send(struct hclge_dev *hdev, u16 queue_id, + bool enable) +{ + struct hclge_reset_tqp_queue_cmd *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false); + + req = (struct hclge_reset_tqp_queue_cmd *)desc.data; + req->tqp_id = cpu_to_le16(queue_id); + if (enable) + hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Send tqp reset cmd error, status =%d\n", ret); + return ret; + } + + return 0; +} + +static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id, + u8 *reset_status) +{ + struct hclge_reset_tqp_queue_cmd *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true); + + req = (struct hclge_reset_tqp_queue_cmd *)desc.data; + req->tqp_id = cpu_to_le16(queue_id); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Get reset status error, status =%d\n", ret); + return ret; + } + + *reset_status = hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B); + + return 0; +} + +u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id) +{ + struct hclge_comm_tqp *tqp; + struct hnae3_queue *queue; + + queue = handle->kinfo.tqp[queue_id]; + tqp = container_of(queue, struct hclge_comm_tqp, q); + + return tqp->index; +} + +static int hclge_reset_tqp_cmd(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + u16 reset_try_times = 0; + u8 reset_status; + u16 queue_gid; + int ret; + u16 i; + + for (i = 0; i < handle->kinfo.num_tqps; i++) { + queue_gid = hclge_covert_handle_qid_global(handle, i); + ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, true); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to send reset tqp cmd, ret = %d\n", + ret); + return ret; + } + + while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) { + ret = hclge_get_reset_status(hdev, queue_gid, + &reset_status); + if (ret) + return ret; + + if (reset_status) + break; + + /* Wait for tqp hw reset */ + usleep_range(1000, 1200); + } + + if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) { + dev_err(&hdev->pdev->dev, + "wait for tqp hw reset timeout\n"); + return -ETIME; + } + + ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, false); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to deassert soft reset, ret = %d\n", + ret); + return ret; + } + reset_try_times = 0; + } + return 0; +} + +static int hclge_reset_rcb(struct hnae3_handle *handle) +{ +#define HCLGE_RESET_RCB_NOT_SUPPORT 0U +#define HCLGE_RESET_RCB_SUCCESS 1U + + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hclge_reset_cmd *req; + struct hclge_desc desc; + u8 return_status; + u16 queue_gid; + int ret; + + queue_gid = hclge_covert_handle_qid_global(handle, 0); + + req = (struct hclge_reset_cmd *)desc.data; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false); + hnae3_set_bit(req->fun_reset_rcb, HCLGE_CFG_RESET_RCB_B, 1); + req->fun_reset_rcb_vqid_start = cpu_to_le16(queue_gid); + req->fun_reset_rcb_vqid_num = cpu_to_le16(handle->kinfo.num_tqps); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to send rcb reset cmd, ret = %d\n", ret); + return ret; + } + + return_status = req->fun_reset_rcb_return_status; + if (return_status == HCLGE_RESET_RCB_SUCCESS) + return 0; + + if (return_status != HCLGE_RESET_RCB_NOT_SUPPORT) { + dev_err(&hdev->pdev->dev, "failed to reset rcb, ret = %u\n", + return_status); + return -EIO; + } + + /* if reset rcb cmd is unsupported, we need to send reset tqp cmd + * again to reset all tqps + */ + return hclge_reset_tqp_cmd(handle); +} + +int hclge_reset_tqp(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + int ret; + + /* only need to disable PF's tqp */ + if (!vport->vport_id) { + ret = hclge_tqp_enable(handle, false); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to disable tqp, ret = %d\n", ret); + return ret; + } + } + + return hclge_reset_rcb(handle); +} + +static u32 hclge_get_fw_version(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + return hdev->fw_version; +} + +static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) +{ + struct phy_device *phydev = hdev->hw.mac.phydev; + + if (!phydev) + return; + + phy_set_asym_pause(phydev, rx_en, tx_en); +} + +static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) +{ + int ret; + + if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) + return 0; + + ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en); + if (ret) + dev_err(&hdev->pdev->dev, + "configure pauseparam error, ret = %d.\n", ret); + + return ret; +} + +int hclge_cfg_flowctrl(struct hclge_dev *hdev) +{ + struct phy_device *phydev = hdev->hw.mac.phydev; + u16 remote_advertising = 0; + u16 local_advertising; + u32 rx_pause, tx_pause; + u8 flowctl; + + if (!phydev->link) + return 0; + + if (!phydev->autoneg) + return hclge_mac_pause_setup_hw(hdev); + + local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising); + + if (phydev->pause) + remote_advertising = LPA_PAUSE_CAP; + + if (phydev->asym_pause) + remote_advertising |= LPA_PAUSE_ASYM; + + flowctl = mii_resolve_flowctrl_fdx(local_advertising, + remote_advertising); + tx_pause = flowctl & FLOW_CTRL_TX; + rx_pause = flowctl & FLOW_CTRL_RX; + + if (phydev->duplex == HCLGE_MAC_HALF) { + tx_pause = 0; + rx_pause = 0; + } + + return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause); +} + +static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg, + u32 *rx_en, u32 *tx_en) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + u8 media_type = hdev->hw.mac.media_type; + + *auto_neg = (media_type == HNAE3_MEDIA_TYPE_COPPER) ? + hclge_get_autoneg(handle) : 0; + + if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { + *rx_en = 0; + *tx_en = 0; + return; + } + + if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) { + *rx_en = 1; + *tx_en = 0; + } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) { + *tx_en = 1; + *rx_en = 0; + } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) { + *rx_en = 1; + *tx_en = 1; + } else { + *rx_en = 0; + *tx_en = 0; + } +} + +static void hclge_record_user_pauseparam(struct hclge_dev *hdev, + u32 rx_en, u32 tx_en) +{ + if (rx_en && tx_en) + hdev->fc_mode_last_time = HCLGE_FC_FULL; + else if (rx_en && !tx_en) + hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE; + else if (!rx_en && tx_en) + hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE; + else + hdev->fc_mode_last_time = HCLGE_FC_NONE; + + hdev->tm_info.fc_mode = hdev->fc_mode_last_time; +} + +static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg, + u32 rx_en, u32 tx_en) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct phy_device *phydev = hdev->hw.mac.phydev; + u32 fc_autoneg; + + if (phydev || hnae3_dev_phy_imp_supported(hdev)) { + fc_autoneg = hclge_get_autoneg(handle); + if (auto_neg != fc_autoneg) { + dev_info(&hdev->pdev->dev, + "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n"); + return -EOPNOTSUPP; + } + } + + if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { + dev_info(&hdev->pdev->dev, + "Priority flow control enabled. Cannot set link flow control.\n"); + return -EOPNOTSUPP; + } + + hclge_set_flowctrl_adv(hdev, rx_en, tx_en); + + hclge_record_user_pauseparam(hdev, rx_en, tx_en); + + if (!auto_neg || hnae3_dev_phy_imp_supported(hdev)) + return hclge_cfg_pauseparam(hdev, rx_en, tx_en); + + if (phydev) + return phy_start_aneg(phydev); + + return -EOPNOTSUPP; +} + +static void hclge_get_ksettings_an_result(struct hnae3_handle *handle, + u8 *auto_neg, u32 *speed, u8 *duplex, u32 *lane_num) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + if (speed) + *speed = hdev->hw.mac.speed; + if (duplex) + *duplex = hdev->hw.mac.duplex; + if (auto_neg) + *auto_neg = hdev->hw.mac.autoneg; + if (lane_num) + *lane_num = hdev->hw.mac.lane_num; +} + +static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type, + u8 *module_type) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + /* When nic is down, the service task is not running, doesn't update + * the port information per second. Query the port information before + * return the media type, ensure getting the correct media information. + */ + hclge_update_port_info(hdev); + + if (media_type) + *media_type = hdev->hw.mac.media_type; + + if (module_type) + *module_type = hdev->hw.mac.module_type; +} + +static void hclge_get_mdix_mode(struct hnae3_handle *handle, + u8 *tp_mdix_ctrl, u8 *tp_mdix) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct phy_device *phydev = hdev->hw.mac.phydev; + int mdix_ctrl, mdix, is_resolved; + unsigned int retval; + + if (!phydev) { + *tp_mdix_ctrl = ETH_TP_MDI_INVALID; + *tp_mdix = ETH_TP_MDI_INVALID; + return; + } + + phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX); + + retval = phy_read(phydev, HCLGE_PHY_CSC_REG); + mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M, + HCLGE_PHY_MDIX_CTRL_S); + + retval = phy_read(phydev, HCLGE_PHY_CSS_REG); + mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B); + is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B); + + phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER); + + switch (mdix_ctrl) { + case 0x0: + *tp_mdix_ctrl = ETH_TP_MDI; + break; + case 0x1: + *tp_mdix_ctrl = ETH_TP_MDI_X; + break; + case 0x3: + *tp_mdix_ctrl = ETH_TP_MDI_AUTO; + break; + default: + *tp_mdix_ctrl = ETH_TP_MDI_INVALID; + break; + } + + if (!is_resolved) + *tp_mdix = ETH_TP_MDI_INVALID; + else if (mdix) + *tp_mdix = ETH_TP_MDI_X; + else + *tp_mdix = ETH_TP_MDI; +} + +static void hclge_info_show(struct hclge_dev *hdev) +{ + struct hnae3_handle *handle = &hdev->vport->nic; + struct device *dev = &hdev->pdev->dev; + + dev_info(dev, "PF info begin:\n"); + + dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps); + dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc); + dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc); + dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport); + dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs); + dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map); + dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size); + dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size); + dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size); + dev_info(dev, "This is %s PF\n", + hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main"); + dev_info(dev, "DCB %s\n", + handle->kinfo.tc_info.dcb_ets_active ? "enable" : "disable"); + dev_info(dev, "MQPRIO %s\n", + handle->kinfo.tc_info.mqprio_active ? "enable" : "disable"); + dev_info(dev, "Default tx spare buffer size: %u\n", + hdev->tx_spare_buf_size); + + dev_info(dev, "PF info end.\n"); +} + +static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev, + struct hclge_vport *vport) +{ + struct hnae3_client *client = vport->nic.client; + struct hclge_dev *hdev = ae_dev->priv; + int rst_cnt = hdev->rst_stats.reset_cnt; + int ret; + + ret = client->ops->init_instance(&vport->nic); + if (ret) + return ret; + + set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state); + if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || + rst_cnt != hdev->rst_stats.reset_cnt) { + ret = -EBUSY; + goto init_nic_err; + } + + /* Enable nic hw error interrupts */ + ret = hclge_config_nic_hw_error(hdev, true); + if (ret) { + dev_err(&ae_dev->pdev->dev, + "fail(%d) to enable hw error interrupts\n", ret); + goto init_nic_err; + } + + hnae3_set_client_init_flag(client, ae_dev, 1); + + if (netif_msg_drv(&hdev->vport->nic)) + hclge_info_show(hdev); + + return ret; + +init_nic_err: + clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state); + while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) + msleep(HCLGE_WAIT_RESET_DONE); + + client->ops->uninit_instance(&vport->nic, 0); + + return ret; +} + +static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev, + struct hclge_vport *vport) +{ + struct hclge_dev *hdev = ae_dev->priv; + struct hnae3_client *client; + int rst_cnt; + int ret; + + if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client || + !hdev->nic_client) + return 0; + + client = hdev->roce_client; + ret = hclge_init_roce_base_info(vport); + if (ret) + return ret; + + rst_cnt = hdev->rst_stats.reset_cnt; + ret = client->ops->init_instance(&vport->roce); + if (ret) + return ret; + + set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state); + if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || + rst_cnt != hdev->rst_stats.reset_cnt) { + ret = -EBUSY; + goto init_roce_err; + } + + /* Enable roce ras interrupts */ + ret = hclge_config_rocee_ras_interrupt(hdev, true); + if (ret) { + dev_err(&ae_dev->pdev->dev, + "fail(%d) to enable roce ras interrupts\n", ret); + goto init_roce_err; + } + + hnae3_set_client_init_flag(client, ae_dev, 1); + + return 0; + +init_roce_err: + clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state); + while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) + msleep(HCLGE_WAIT_RESET_DONE); + + hdev->roce_client->ops->uninit_instance(&vport->roce, 0); + + return ret; +} + +static int hclge_init_client_instance(struct hnae3_client *client, + struct hnae3_ae_dev *ae_dev) +{ + struct hclge_dev *hdev = ae_dev->priv; + struct hclge_vport *vport = &hdev->vport[0]; + int ret; + + switch (client->type) { + case HNAE3_CLIENT_KNIC: + hdev->nic_client = client; + vport->nic.client = client; + ret = hclge_init_nic_client_instance(ae_dev, vport); + if (ret) + goto clear_nic; + + ret = hclge_init_roce_client_instance(ae_dev, vport); + if (ret) + goto clear_roce; + + break; + case HNAE3_CLIENT_ROCE: + if (hnae3_dev_roce_supported(hdev)) { + hdev->roce_client = client; + vport->roce.client = client; + } + + ret = hclge_init_roce_client_instance(ae_dev, vport); + if (ret) + goto clear_roce; + + break; + default: + return -EINVAL; + } + + return 0; + +clear_nic: + hdev->nic_client = NULL; + vport->nic.client = NULL; + return ret; +clear_roce: + hdev->roce_client = NULL; + vport->roce.client = NULL; + return ret; +} + +static void hclge_uninit_client_instance(struct hnae3_client *client, + struct hnae3_ae_dev *ae_dev) +{ + struct hclge_dev *hdev = ae_dev->priv; + struct hclge_vport *vport = &hdev->vport[0]; + + if (hdev->roce_client) { + clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state); + while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) + msleep(HCLGE_WAIT_RESET_DONE); + + hdev->roce_client->ops->uninit_instance(&vport->roce, 0); + hdev->roce_client = NULL; + vport->roce.client = NULL; + } + if (client->type == HNAE3_CLIENT_ROCE) + return; + if (hdev->nic_client && client->ops->uninit_instance) { + clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state); + while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) + msleep(HCLGE_WAIT_RESET_DONE); + + client->ops->uninit_instance(&vport->nic, 0); + hdev->nic_client = NULL; + vport->nic.client = NULL; + } +} + +static int hclge_dev_mem_map(struct hclge_dev *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + struct hclge_hw *hw = &hdev->hw; + + /* for device does not have device memory, return directly */ + if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR))) + return 0; + + hw->hw.mem_base = + devm_ioremap_wc(&pdev->dev, + pci_resource_start(pdev, HCLGE_MEM_BAR), + pci_resource_len(pdev, HCLGE_MEM_BAR)); + if (!hw->hw.mem_base) { + dev_err(&pdev->dev, "failed to map device memory\n"); + return -EFAULT; + } + + return 0; +} + +static int hclge_pci_init(struct hclge_dev *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + struct hclge_hw *hw; + int ret; + + ret = pci_enable_device(pdev); + if (ret) { + dev_err(&pdev->dev, "failed to enable PCI device\n"); + return ret; + } + + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (ret) { + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (ret) { + dev_err(&pdev->dev, + "can't set consistent PCI DMA"); + goto err_disable_device; + } + dev_warn(&pdev->dev, "set DMA mask to 32 bits\n"); + } + + ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME); + if (ret) { + dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); + goto err_disable_device; + } + + pci_set_master(pdev); + hw = &hdev->hw; + hw->hw.io_base = pcim_iomap(pdev, 2, 0); + if (!hw->hw.io_base) { + dev_err(&pdev->dev, "Can't map configuration register space\n"); + ret = -ENOMEM; + goto err_clr_master; + } + + ret = hclge_dev_mem_map(hdev); + if (ret) + goto err_unmap_io_base; + + hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev); + + return 0; + +err_unmap_io_base: + pcim_iounmap(pdev, hdev->hw.hw.io_base); +err_clr_master: + pci_clear_master(pdev); + pci_release_regions(pdev); +err_disable_device: + pci_disable_device(pdev); + + return ret; +} + +static void hclge_pci_uninit(struct hclge_dev *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + + if (hdev->hw.hw.mem_base) + devm_iounmap(&pdev->dev, hdev->hw.hw.mem_base); + + pcim_iounmap(pdev, hdev->hw.hw.io_base); + pci_free_irq_vectors(pdev); + pci_clear_master(pdev); + pci_release_mem_regions(pdev); + pci_disable_device(pdev); +} + +static void hclge_state_init(struct hclge_dev *hdev) +{ + set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state); + set_bit(HCLGE_STATE_DOWN, &hdev->state); + clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state); + clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); + clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state); + clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state); + clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); +} + +static void hclge_state_uninit(struct hclge_dev *hdev) +{ + set_bit(HCLGE_STATE_DOWN, &hdev->state); + set_bit(HCLGE_STATE_REMOVING, &hdev->state); + + if (hdev->reset_timer.function) + del_timer_sync(&hdev->reset_timer); + if (hdev->service_task.work.func) + cancel_delayed_work_sync(&hdev->service_task); +} + +static void hclge_reset_prepare_general(struct hnae3_ae_dev *ae_dev, + enum hnae3_reset_type rst_type) +{ +#define HCLGE_RESET_RETRY_WAIT_MS 500 +#define HCLGE_RESET_RETRY_CNT 5 + + struct hclge_dev *hdev = ae_dev->priv; + int retry_cnt = 0; + int ret; + + while (retry_cnt++ < HCLGE_RESET_RETRY_CNT) { + down(&hdev->reset_sem); + set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); + hdev->reset_type = rst_type; + ret = hclge_reset_prepare(hdev); + if (!ret && !hdev->reset_pending) + break; + + dev_err(&hdev->pdev->dev, + "failed to prepare to reset, ret=%d, reset_pending:0x%lx, retry_cnt:%d\n", + ret, hdev->reset_pending, retry_cnt); + clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); + up(&hdev->reset_sem); + msleep(HCLGE_RESET_RETRY_WAIT_MS); + } + + /* disable misc vector before reset done */ + hclge_enable_vector(&hdev->misc_vector, false); + set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); + + if (hdev->reset_type == HNAE3_FLR_RESET) + hdev->rst_stats.flr_rst_cnt++; +} + +static void hclge_reset_done(struct hnae3_ae_dev *ae_dev) +{ + struct hclge_dev *hdev = ae_dev->priv; + int ret; + + hclge_enable_vector(&hdev->misc_vector, true); + + ret = hclge_reset_rebuild(hdev); + if (ret) + dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret); + + hdev->reset_type = HNAE3_NONE_RESET; + clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); + up(&hdev->reset_sem); +} + +static void hclge_clear_resetting_state(struct hclge_dev *hdev) +{ + u16 i; + + for (i = 0; i < hdev->num_alloc_vport; i++) { + struct hclge_vport *vport = &hdev->vport[i]; + int ret; + + /* Send cmd to clear vport's FUNC_RST_ING */ + ret = hclge_set_vf_rst(hdev, vport->vport_id, false); + if (ret) + dev_warn(&hdev->pdev->dev, + "clear vport(%u) rst failed %d!\n", + vport->vport_id, ret); + } +} + +static int hclge_clear_hw_resource(struct hclge_dev *hdev) +{ + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CLEAR_HW_RESOURCE, false); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + /* This new command is only supported by new firmware, it will + * fail with older firmware. Error value -EOPNOSUPP can only be + * returned by older firmware running this command, to keep code + * backward compatible we will override this value and return + * success. + */ + if (ret && ret != -EOPNOTSUPP) { + dev_err(&hdev->pdev->dev, + "failed to clear hw resource, ret = %d\n", ret); + return ret; + } + return 0; +} + +static void hclge_init_rxd_adv_layout(struct hclge_dev *hdev) +{ + if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev)) + hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 1); +} + +static void hclge_uninit_rxd_adv_layout(struct hclge_dev *hdev) +{ + if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev)) + hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 0); +} + +static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) +{ + struct pci_dev *pdev = ae_dev->pdev; + struct hclge_dev *hdev; + int ret; + + hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); + if (!hdev) + return -ENOMEM; + + hdev->pdev = pdev; + hdev->ae_dev = ae_dev; + hdev->reset_type = HNAE3_NONE_RESET; + hdev->reset_level = HNAE3_FUNC_RESET; + ae_dev->priv = hdev; + + /* HW supprt 2 layer vlan */ + hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN; + + mutex_init(&hdev->vport_lock); + spin_lock_init(&hdev->fd_rule_lock); + sema_init(&hdev->reset_sem, 1); + + ret = hclge_pci_init(hdev); + if (ret) + goto out; + + ret = hclge_devlink_init(hdev); + if (ret) + goto err_pci_uninit; + + /* Firmware command queue initialize */ + ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw); + if (ret) + goto err_devlink_uninit; + + /* Firmware command initialize */ + ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, &hdev->fw_version, + true, hdev->reset_pending); + if (ret) + goto err_cmd_uninit; + + ret = hclge_clear_hw_resource(hdev); + if (ret) + goto err_cmd_uninit; + + ret = hclge_get_cap(hdev); + if (ret) + goto err_cmd_uninit; + + ret = hclge_query_dev_specs(hdev); + if (ret) { + dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n", + ret); + goto err_cmd_uninit; + } + + ret = hclge_configure(hdev); + if (ret) { + dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret); + goto err_cmd_uninit; + } + + ret = hclge_init_msi(hdev); + if (ret) { + dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret); + goto err_cmd_uninit; + } + + ret = hclge_misc_irq_init(hdev); + if (ret) + goto err_msi_uninit; + + ret = hclge_alloc_tqps(hdev); + if (ret) { + dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret); + goto err_msi_irq_uninit; + } + + ret = hclge_alloc_vport(hdev); + if (ret) + goto err_msi_irq_uninit; + + ret = hclge_map_tqp(hdev); + if (ret) + goto err_msi_irq_uninit; + + if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) { + clear_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps); + if (hnae3_dev_phy_imp_supported(hdev)) + ret = hclge_update_tp_port_info(hdev); + else + ret = hclge_mac_mdio_config(hdev); + + if (ret) + goto err_msi_irq_uninit; + } + + ret = hclge_init_umv_space(hdev); + if (ret) + goto err_mdiobus_unreg; + + ret = hclge_mac_init(hdev); + if (ret) { + dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); + goto err_mdiobus_unreg; + } + + ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); + if (ret) { + dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret); + goto err_mdiobus_unreg; + } + + ret = hclge_config_gro(hdev); + if (ret) + goto err_mdiobus_unreg; + + ret = hclge_init_vlan_config(hdev); + if (ret) { + dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); + goto err_mdiobus_unreg; + } + + ret = hclge_tm_schd_init(hdev); + if (ret) { + dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret); + goto err_mdiobus_unreg; + } + + ret = hclge_comm_rss_init_cfg(&hdev->vport->nic, hdev->ae_dev, + &hdev->rss_cfg); + if (ret) { + dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret); + goto err_mdiobus_unreg; + } + + ret = hclge_rss_init_hw(hdev); + if (ret) { + dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret); + goto err_mdiobus_unreg; + } + + ret = init_mgr_tbl(hdev); + if (ret) { + dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret); + goto err_mdiobus_unreg; + } + + ret = hclge_init_fd_config(hdev); + if (ret) { + dev_err(&pdev->dev, + "fd table init fail, ret=%d\n", ret); + goto err_mdiobus_unreg; + } + + ret = hclge_ptp_init(hdev); + if (ret) + goto err_mdiobus_unreg; + + ret = hclge_update_port_info(hdev); + if (ret) + goto err_mdiobus_unreg; + + INIT_KFIFO(hdev->mac_tnl_log); + + hclge_dcb_ops_set(hdev); + + timer_setup(&hdev->reset_timer, hclge_reset_timer, 0); + INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task); + + hclge_clear_all_event_cause(hdev); + hclge_clear_resetting_state(hdev); + + /* Log and clear the hw errors those already occurred */ + if (hnae3_dev_ras_imp_supported(hdev)) + hclge_handle_occurred_error(hdev); + else + hclge_handle_all_hns_hw_errors(ae_dev); + + /* request delayed reset for the error recovery because an immediate + * global reset on a PF affecting pending initialization of other PFs + */ + if (ae_dev->hw_err_reset_req) { + enum hnae3_reset_type reset_level; + + reset_level = hclge_get_reset_level(ae_dev, + &ae_dev->hw_err_reset_req); + hclge_set_def_reset_request(ae_dev, reset_level); + mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL); + } + + hclge_init_rxd_adv_layout(hdev); + + /* Enable MISC vector(vector0) */ + hclge_enable_vector(&hdev->misc_vector, true); + + hclge_state_init(hdev); + hdev->last_reset_time = jiffies; + + dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n", + HCLGE_DRIVER_NAME); + + hclge_task_schedule(hdev, round_jiffies_relative(HZ)); + + return 0; + +err_mdiobus_unreg: + if (hdev->hw.mac.phydev) + mdiobus_unregister(hdev->hw.mac.mdio_bus); +err_msi_irq_uninit: + hclge_misc_irq_uninit(hdev); +err_msi_uninit: + pci_free_irq_vectors(pdev); +err_cmd_uninit: + hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw); +err_devlink_uninit: + hclge_devlink_uninit(hdev); +err_pci_uninit: + pcim_iounmap(pdev, hdev->hw.hw.io_base); + pci_clear_master(pdev); + pci_release_regions(pdev); + pci_disable_device(pdev); +out: + mutex_destroy(&hdev->vport_lock); + return ret; +} + +static void hclge_stats_clear(struct hclge_dev *hdev) +{ + memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats)); + memset(&hdev->fec_stats, 0, sizeof(hdev->fec_stats)); +} + +static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable) +{ + return hclge_config_switch_param(hdev, vf, enable, + HCLGE_SWITCH_ANTI_SPOOF_MASK); +} + +static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable) +{ + return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, + HCLGE_FILTER_FE_NIC_INGRESS_B, + enable, vf); +} + +static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable) +{ + int ret; + + ret = hclge_set_mac_spoofchk(hdev, vf, enable); + if (ret) { + dev_err(&hdev->pdev->dev, + "Set vf %d mac spoof check %s failed, ret=%d\n", + vf, enable ? "on" : "off", ret); + return ret; + } + + ret = hclge_set_vlan_spoofchk(hdev, vf, enable); + if (ret) + dev_err(&hdev->pdev->dev, + "Set vf %d vlan spoof check %s failed, ret=%d\n", + vf, enable ? "on" : "off", ret); + + return ret; +} + +static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf, + bool enable) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + u32 new_spoofchk = enable ? 1 : 0; + int ret; + + if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) + return -EOPNOTSUPP; + + vport = hclge_get_vf_vport(hdev, vf); + if (!vport) + return -EINVAL; + + if (vport->vf_info.spoofchk == new_spoofchk) + return 0; + + if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full)) + dev_warn(&hdev->pdev->dev, + "vf %d vlan table is full, enable spoof check may cause its packet send fail\n", + vf); + else if (enable && hclge_is_umv_space_full(vport, true)) + dev_warn(&hdev->pdev->dev, + "vf %d mac table is full, enable spoof check may cause its packet send fail\n", + vf); + + ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable); + if (ret) + return ret; + + vport->vf_info.spoofchk = new_spoofchk; + return 0; +} + +static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = hdev->vport; + int ret; + int i; + + if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) + return 0; + + /* resume the vf spoof check state after reset */ + for (i = 0; i < hdev->num_alloc_vport; i++) { + ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, + vport->vf_info.spoofchk); + if (ret) + return ret; + + vport++; + } + + return 0; +} + +static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + u32 new_trusted = enable ? 1 : 0; + + vport = hclge_get_vf_vport(hdev, vf); + if (!vport) + return -EINVAL; + + if (vport->vf_info.trusted == new_trusted) + return 0; + + vport->vf_info.trusted = new_trusted; + set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state); + hclge_task_schedule(hdev, 0); + + return 0; +} + +static void hclge_reset_vf_rate(struct hclge_dev *hdev) +{ + int ret; + int vf; + + /* reset vf rate to default value */ + for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) { + struct hclge_vport *vport = &hdev->vport[vf]; + + vport->vf_info.max_tx_rate = 0; + ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate); + if (ret) + dev_err(&hdev->pdev->dev, + "vf%d failed to reset to default, ret=%d\n", + vf - HCLGE_VF_VPORT_START_NUM, ret); + } +} + +static int hclge_vf_rate_param_check(struct hclge_dev *hdev, + int min_tx_rate, int max_tx_rate) +{ + if (min_tx_rate != 0 || + max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) { + dev_err(&hdev->pdev->dev, + "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n", + min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed); + return -EINVAL; + } + + return 0; +} + +static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf, + int min_tx_rate, int max_tx_rate, bool force) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + int ret; + + ret = hclge_vf_rate_param_check(hdev, min_tx_rate, max_tx_rate); + if (ret) + return ret; + + vport = hclge_get_vf_vport(hdev, vf); + if (!vport) + return -EINVAL; + + if (!force && max_tx_rate == vport->vf_info.max_tx_rate) + return 0; + + ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate); + if (ret) + return ret; + + vport->vf_info.max_tx_rate = max_tx_rate; + + return 0; +} + +static int hclge_resume_vf_rate(struct hclge_dev *hdev) +{ + struct hnae3_handle *handle = &hdev->vport->nic; + struct hclge_vport *vport; + int ret; + int vf; + + /* resume the vf max_tx_rate after reset */ + for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) { + vport = hclge_get_vf_vport(hdev, vf); + if (!vport) + return -EINVAL; + + /* zero means max rate, after reset, firmware already set it to + * max rate, so just continue. + */ + if (!vport->vf_info.max_tx_rate) + continue; + + ret = hclge_set_vf_rate(handle, vf, 0, + vport->vf_info.max_tx_rate, true); + if (ret) { + dev_err(&hdev->pdev->dev, + "vf%d failed to resume tx_rate:%u, ret=%d\n", + vf, vport->vf_info.max_tx_rate, ret); + return ret; + } + } + + return 0; +} + +static void hclge_reset_vport_state(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = hdev->vport; + int i; + + for (i = 0; i < hdev->num_alloc_vport; i++) { + clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); + vport++; + } +} + +static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) +{ + struct hclge_dev *hdev = ae_dev->priv; + struct pci_dev *pdev = ae_dev->pdev; + int ret; + + set_bit(HCLGE_STATE_DOWN, &hdev->state); + + hclge_stats_clear(hdev); + /* NOTE: pf reset needn't to clear or restore pf and vf table entry. + * so here should not clean table in memory. + */ + if (hdev->reset_type == HNAE3_IMP_RESET || + hdev->reset_type == HNAE3_GLOBAL_RESET) { + memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table)); + memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full)); + bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport); + hclge_reset_umv_space(hdev); + } + + ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, &hdev->fw_version, + true, hdev->reset_pending); + if (ret) { + dev_err(&pdev->dev, "Cmd queue init failed\n"); + return ret; + } + + ret = hclge_map_tqp(hdev); + if (ret) { + dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret); + return ret; + } + + ret = hclge_mac_init(hdev); + if (ret) { + dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); + return ret; + } + + ret = hclge_tp_port_init(hdev); + if (ret) { + dev_err(&pdev->dev, "failed to init tp port, ret = %d\n", + ret); + return ret; + } + + ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); + if (ret) { + dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret); + return ret; + } + + ret = hclge_config_gro(hdev); + if (ret) + return ret; + + ret = hclge_init_vlan_config(hdev); + if (ret) { + dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); + return ret; + } + + ret = hclge_tm_init_hw(hdev, true); + if (ret) { + dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret); + return ret; + } + + ret = hclge_rss_init_hw(hdev); + if (ret) { + dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret); + return ret; + } + + ret = init_mgr_tbl(hdev); + if (ret) { + dev_err(&pdev->dev, + "failed to reinit manager table, ret = %d\n", ret); + return ret; + } + + ret = hclge_init_fd_config(hdev); + if (ret) { + dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret); + return ret; + } + + ret = hclge_ptp_init(hdev); + if (ret) + return ret; + + /* Log and clear the hw errors those already occurred */ + if (hnae3_dev_ras_imp_supported(hdev)) + hclge_handle_occurred_error(hdev); + else + hclge_handle_all_hns_hw_errors(ae_dev); + + /* Re-enable the hw error interrupts because + * the interrupts get disabled on global reset. + */ + ret = hclge_config_nic_hw_error(hdev, true); + if (ret) { + dev_err(&pdev->dev, + "fail(%d) to re-enable NIC hw error interrupts\n", + ret); + return ret; + } + + if (hdev->roce_client) { + ret = hclge_config_rocee_ras_interrupt(hdev, true); + if (ret) { + dev_err(&pdev->dev, + "fail(%d) to re-enable roce ras interrupts\n", + ret); + return ret; + } + } + + hclge_reset_vport_state(hdev); + ret = hclge_reset_vport_spoofchk(hdev); + if (ret) + return ret; + + ret = hclge_resume_vf_rate(hdev); + if (ret) + return ret; + + hclge_init_rxd_adv_layout(hdev); + + dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n", + HCLGE_DRIVER_NAME); + + return 0; +} + +static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) +{ + struct hclge_dev *hdev = ae_dev->priv; + struct hclge_mac *mac = &hdev->hw.mac; + + hclge_reset_vf_rate(hdev); + hclge_clear_vf_vlan(hdev); + hclge_state_uninit(hdev); + hclge_ptp_uninit(hdev); + hclge_uninit_rxd_adv_layout(hdev); + hclge_uninit_mac_table(hdev); + hclge_del_all_fd_entries(hdev); + + if (mac->phydev) + mdiobus_unregister(mac->mdio_bus); + + /* Disable MISC vector(vector0) */ + hclge_enable_vector(&hdev->misc_vector, false); + synchronize_irq(hdev->misc_vector.vector_irq); + + /* Disable all hw interrupts */ + hclge_config_mac_tnl_int(hdev, false); + hclge_config_nic_hw_error(hdev, false); + hclge_config_rocee_ras_interrupt(hdev, false); + + hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw); + hclge_misc_irq_uninit(hdev); + hclge_devlink_uninit(hdev); + hclge_pci_uninit(hdev); + hclge_uninit_vport_vlan_table(hdev); + mutex_destroy(&hdev->vport_lock); + ae_dev->priv = NULL; +} + +static u32 hclge_get_max_channels(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps); +} + +static void hclge_get_channels(struct hnae3_handle *handle, + struct ethtool_channels *ch) +{ + ch->max_combined = hclge_get_max_channels(handle); + ch->other_count = 1; + ch->max_other = 1; + ch->combined_count = handle->kinfo.rss_size; +} + +static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle, + u16 *alloc_tqps, u16 *max_rss_size) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + *alloc_tqps = vport->alloc_tqps; + *max_rss_size = hdev->pf_rss_size_max; +} + +static int hclge_set_rss_tc_mode_cfg(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + u16 tc_offset[HCLGE_MAX_TC_NUM] = {0}; + struct hclge_dev *hdev = vport->back; + u16 tc_size[HCLGE_MAX_TC_NUM] = {0}; + u16 tc_valid[HCLGE_MAX_TC_NUM]; + u16 roundup_size; + unsigned int i; + + roundup_size = roundup_pow_of_two(vport->nic.kinfo.rss_size); + roundup_size = ilog2(roundup_size); + /* Set the RSS TC mode according to the new RSS size */ + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { + tc_valid[i] = 0; + + if (!(hdev->hw_tc_map & BIT(i))) + continue; + + tc_valid[i] = 1; + tc_size[i] = roundup_size; + tc_offset[i] = vport->nic.kinfo.rss_size * i; + } + + return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, tc_valid, + tc_size); +} + +static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num, + bool rxfh_configured) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); + struct hclge_vport *vport = hclge_get_vport(handle); + struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; + struct hclge_dev *hdev = vport->back; + u16 cur_rss_size = kinfo->rss_size; + u16 cur_tqps = kinfo->num_tqps; + u32 *rss_indir; + unsigned int i; + int ret; + + kinfo->req_rss_size = new_tqps_num; + + ret = hclge_tm_vport_map_update(hdev); + if (ret) { + dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret); + return ret; + } + + ret = hclge_set_rss_tc_mode_cfg(handle); + if (ret) + return ret; + + /* RSS indirection table has been configured by user */ + if (rxfh_configured) + goto out; + + /* Reinitializes the rss indirect table according to the new RSS size */ + rss_indir = kcalloc(ae_dev->dev_specs.rss_ind_tbl_size, sizeof(u32), + GFP_KERNEL); + if (!rss_indir) + return -ENOMEM; + + for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++) + rss_indir[i] = i % kinfo->rss_size; + + ret = hclge_set_rss(handle, rss_indir, NULL, 0); + if (ret) + dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n", + ret); + + kfree(rss_indir); + +out: + if (!ret) + dev_info(&hdev->pdev->dev, + "Channels changed, rss_size from %u to %u, tqps from %u to %u", + cur_rss_size, kinfo->rss_size, + cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc); + + return ret; +} + +static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit, + u32 *regs_num_64_bit) +{ + struct hclge_desc desc; + u32 total_num; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Query register number cmd failed, ret = %d.\n", ret); + return ret; + } + + *regs_num_32_bit = le32_to_cpu(desc.data[0]); + *regs_num_64_bit = le32_to_cpu(desc.data[1]); + + total_num = *regs_num_32_bit + *regs_num_64_bit; + if (!total_num) + return -EINVAL; + + return 0; +} + +static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num, + void *data) +{ +#define HCLGE_32_BIT_REG_RTN_DATANUM 8 +#define HCLGE_32_BIT_DESC_NODATA_LEN 2 + + struct hclge_desc *desc; + u32 *reg_val = data; + __le32 *desc_data; + int nodata_num; + int cmd_num; + int i, k, n; + int ret; + + if (regs_num == 0) + return 0; + + nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN; + cmd_num = DIV_ROUND_UP(regs_num + nodata_num, + HCLGE_32_BIT_REG_RTN_DATANUM); + desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL); + if (!desc) + return -ENOMEM; + + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true); + ret = hclge_cmd_send(&hdev->hw, desc, cmd_num); + if (ret) { + dev_err(&hdev->pdev->dev, + "Query 32 bit register cmd failed, ret = %d.\n", ret); + kfree(desc); + return ret; + } + + for (i = 0; i < cmd_num; i++) { + if (i == 0) { + desc_data = (__le32 *)(&desc[i].data[0]); + n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num; + } else { + desc_data = (__le32 *)(&desc[i]); + n = HCLGE_32_BIT_REG_RTN_DATANUM; + } + for (k = 0; k < n; k++) { + *reg_val++ = le32_to_cpu(*desc_data++); + + regs_num--; + if (!regs_num) + break; + } + } + + kfree(desc); + return 0; +} + +static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num, + void *data) +{ +#define HCLGE_64_BIT_REG_RTN_DATANUM 4 +#define HCLGE_64_BIT_DESC_NODATA_LEN 1 + + struct hclge_desc *desc; + u64 *reg_val = data; + __le64 *desc_data; + int nodata_len; + int cmd_num; + int i, k, n; + int ret; + + if (regs_num == 0) + return 0; + + nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN; + cmd_num = DIV_ROUND_UP(regs_num + nodata_len, + HCLGE_64_BIT_REG_RTN_DATANUM); + desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL); + if (!desc) + return -ENOMEM; + + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true); + ret = hclge_cmd_send(&hdev->hw, desc, cmd_num); + if (ret) { + dev_err(&hdev->pdev->dev, + "Query 64 bit register cmd failed, ret = %d.\n", ret); + kfree(desc); + return ret; + } + + for (i = 0; i < cmd_num; i++) { + if (i == 0) { + desc_data = (__le64 *)(&desc[i].data[0]); + n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len; + } else { + desc_data = (__le64 *)(&desc[i]); + n = HCLGE_64_BIT_REG_RTN_DATANUM; + } + for (k = 0; k < n; k++) { + *reg_val++ = le64_to_cpu(*desc_data++); + + regs_num--; + if (!regs_num) + break; + } + } + + kfree(desc); + return 0; +} + +#define MAX_SEPARATE_NUM 4 +#define SEPARATOR_VALUE 0xFDFCFBFA +#define REG_NUM_PER_LINE 4 +#define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32)) +#define REG_SEPARATOR_LINE 1 +#define REG_NUM_REMAIN_MASK 3 + +int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc) +{ + int i; + + /* initialize command BD except the last one */ + for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) { + hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, + true); + desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + } + + /* initialize the last command BD */ + hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true); + + return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT); +} + +static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev, + int *bd_num_list, + u32 type_num) +{ + u32 entries_per_desc, desc_index, index, offset, i; + struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT]; + int ret; + + ret = hclge_query_bd_num_cmd_send(hdev, desc); + if (ret) { + dev_err(&hdev->pdev->dev, + "Get dfx bd num fail, status is %d.\n", ret); + return ret; + } + + entries_per_desc = ARRAY_SIZE(desc[0].data); + for (i = 0; i < type_num; i++) { + offset = hclge_dfx_bd_offset_list[i]; + index = offset % entries_per_desc; + desc_index = offset / entries_per_desc; + bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]); + } + + return ret; +} + +static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev, + struct hclge_desc *desc_src, int bd_num, + enum hclge_opcode_type cmd) +{ + struct hclge_desc *desc = desc_src; + int i, ret; + + hclge_cmd_setup_basic_desc(desc, cmd, true); + for (i = 0; i < bd_num - 1; i++) { + desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + desc++; + hclge_cmd_setup_basic_desc(desc, cmd, true); + } + + desc = desc_src; + ret = hclge_cmd_send(&hdev->hw, desc, bd_num); + if (ret) + dev_err(&hdev->pdev->dev, + "Query dfx reg cmd(0x%x) send fail, status is %d.\n", + cmd, ret); + + return ret; +} + +static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num, + void *data) +{ + int entries_per_desc, reg_num, separator_num, desc_index, index, i; + struct hclge_desc *desc = desc_src; + u32 *reg = data; + + entries_per_desc = ARRAY_SIZE(desc->data); + reg_num = entries_per_desc * bd_num; + separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK); + for (i = 0; i < reg_num; i++) { + index = i % entries_per_desc; + desc_index = i / entries_per_desc; + *reg++ = le32_to_cpu(desc[desc_index].data[index]); + } + for (i = 0; i < separator_num; i++) + *reg++ = SEPARATOR_VALUE; + + return reg_num + separator_num; +} + +static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len) +{ + u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list); + int data_len_per_desc, bd_num, i; + int *bd_num_list; + u32 data_len; + int ret; + + bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL); + if (!bd_num_list) + return -ENOMEM; + + ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num); + if (ret) { + dev_err(&hdev->pdev->dev, + "Get dfx reg bd num fail, status is %d.\n", ret); + goto out; + } + + data_len_per_desc = sizeof_field(struct hclge_desc, data); + *len = 0; + for (i = 0; i < dfx_reg_type_num; i++) { + bd_num = bd_num_list[i]; + data_len = data_len_per_desc * bd_num; + *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE; + } + +out: + kfree(bd_num_list); + return ret; +} + +static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data) +{ + u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list); + int bd_num, bd_num_max, buf_len, i; + struct hclge_desc *desc_src; + int *bd_num_list; + u32 *reg = data; + int ret; + + bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL); + if (!bd_num_list) + return -ENOMEM; + + ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num); + if (ret) { + dev_err(&hdev->pdev->dev, + "Get dfx reg bd num fail, status is %d.\n", ret); + goto out; + } + + bd_num_max = bd_num_list[0]; + for (i = 1; i < dfx_reg_type_num; i++) + bd_num_max = max_t(int, bd_num_max, bd_num_list[i]); + + buf_len = sizeof(*desc_src) * bd_num_max; + desc_src = kzalloc(buf_len, GFP_KERNEL); + if (!desc_src) { + ret = -ENOMEM; + goto out; + } + + for (i = 0; i < dfx_reg_type_num; i++) { + bd_num = bd_num_list[i]; + ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num, + hclge_dfx_reg_opcode_list[i]); + if (ret) { + dev_err(&hdev->pdev->dev, + "Get dfx reg fail, status is %d.\n", ret); + break; + } + + reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg); + } + + kfree(desc_src); +out: + kfree(bd_num_list); + return ret; +} + +static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data, + struct hnae3_knic_private_info *kinfo) +{ +#define HCLGE_RING_REG_OFFSET 0x200 +#define HCLGE_RING_INT_REG_OFFSET 0x4 + + int i, j, reg_num, separator_num; + int data_num_sum; + u32 *reg = data; + + /* fetching per-PF registers valus from PF PCIe register space */ + reg_num = ARRAY_SIZE(cmdq_reg_addr_list); + separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK); + for (i = 0; i < reg_num; i++) + *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]); + for (i = 0; i < separator_num; i++) + *reg++ = SEPARATOR_VALUE; + data_num_sum = reg_num + separator_num; + + reg_num = ARRAY_SIZE(common_reg_addr_list); + separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK); + for (i = 0; i < reg_num; i++) + *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]); + for (i = 0; i < separator_num; i++) + *reg++ = SEPARATOR_VALUE; + data_num_sum += reg_num + separator_num; + + reg_num = ARRAY_SIZE(ring_reg_addr_list); + separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK); + for (j = 0; j < kinfo->num_tqps; j++) { + for (i = 0; i < reg_num; i++) + *reg++ = hclge_read_dev(&hdev->hw, + ring_reg_addr_list[i] + + HCLGE_RING_REG_OFFSET * j); + for (i = 0; i < separator_num; i++) + *reg++ = SEPARATOR_VALUE; + } + data_num_sum += (reg_num + separator_num) * kinfo->num_tqps; + + reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list); + separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK); + for (j = 0; j < hdev->num_msi_used - 1; j++) { + for (i = 0; i < reg_num; i++) + *reg++ = hclge_read_dev(&hdev->hw, + tqp_intr_reg_addr_list[i] + + HCLGE_RING_INT_REG_OFFSET * j); + for (i = 0; i < separator_num; i++) + *reg++ = SEPARATOR_VALUE; + } + data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1); + + return data_num_sum; +} + +static int hclge_get_regs_len(struct hnae3_handle *handle) +{ + int cmdq_lines, common_lines, ring_lines, tqp_intr_lines; + struct hnae3_knic_private_info *kinfo = &handle->kinfo; + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + int regs_num_32_bit, regs_num_64_bit, dfx_regs_len; + int regs_lines_32_bit, regs_lines_64_bit; + int ret; + + ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit); + if (ret) { + dev_err(&hdev->pdev->dev, + "Get register number failed, ret = %d.\n", ret); + return ret; + } + + ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len); + if (ret) { + dev_err(&hdev->pdev->dev, + "Get dfx reg len failed, ret = %d.\n", ret); + return ret; + } + + cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + + REG_SEPARATOR_LINE; + common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + + REG_SEPARATOR_LINE; + ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + + REG_SEPARATOR_LINE; + tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + + REG_SEPARATOR_LINE; + regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE + + REG_SEPARATOR_LINE; + regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE + + REG_SEPARATOR_LINE; + + return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps + + tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit + + regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len; +} + +static void hclge_get_regs(struct hnae3_handle *handle, u32 *version, + void *data) +{ + struct hnae3_knic_private_info *kinfo = &handle->kinfo; + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + u32 regs_num_32_bit, regs_num_64_bit; + int i, reg_num, separator_num, ret; + u32 *reg = data; + + *version = hdev->fw_version; + + ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit); + if (ret) { + dev_err(&hdev->pdev->dev, + "Get register number failed, ret = %d.\n", ret); + return; + } + + reg += hclge_fetch_pf_reg(hdev, reg, kinfo); + + ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg); + if (ret) { + dev_err(&hdev->pdev->dev, + "Get 32 bit register failed, ret = %d.\n", ret); + return; + } + reg_num = regs_num_32_bit; + reg += reg_num; + separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK); + for (i = 0; i < separator_num; i++) + *reg++ = SEPARATOR_VALUE; + + ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg); + if (ret) { + dev_err(&hdev->pdev->dev, + "Get 64 bit register failed, ret = %d.\n", ret); + return; + } + reg_num = regs_num_64_bit * 2; + reg += reg_num; + separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK); + for (i = 0; i < separator_num; i++) + *reg++ = SEPARATOR_VALUE; + + ret = hclge_get_dfx_reg(hdev, reg); + if (ret) + dev_err(&hdev->pdev->dev, + "Get dfx register failed, ret = %d.\n", ret); +} + +static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status) +{ + struct hclge_set_led_state_cmd *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false); + + req = (struct hclge_set_led_state_cmd *)desc.data; + hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M, + HCLGE_LED_LOCATE_STATE_S, locate_led_status); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "Send set led state cmd error, ret =%d\n", ret); + + return ret; +} + +enum hclge_led_status { + HCLGE_LED_OFF, + HCLGE_LED_ON, + HCLGE_LED_NO_CHANGE = 0xFF, +}; + +static int hclge_set_led_id(struct hnae3_handle *handle, + enum ethtool_phys_id_state status) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + switch (status) { + case ETHTOOL_ID_ACTIVE: + return hclge_set_led_status(hdev, HCLGE_LED_ON); + case ETHTOOL_ID_INACTIVE: + return hclge_set_led_status(hdev, HCLGE_LED_OFF); + default: + return -EINVAL; + } +} + +static void hclge_get_link_mode(struct hnae3_handle *handle, + unsigned long *supported, + unsigned long *advertising) +{ + unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS); + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + unsigned int idx = 0; + + for (; idx < size; idx++) { + supported[idx] = hdev->hw.mac.supported[idx]; + advertising[idx] = hdev->hw.mac.advertising[idx]; + } +} + +static int hclge_gro_en(struct hnae3_handle *handle, bool enable) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + bool gro_en_old = hdev->gro_en; + int ret; + + hdev->gro_en = enable; + ret = hclge_config_gro(hdev); + if (ret) + hdev->gro_en = gro_en_old; + + return ret; +} + +static int hclge_sync_vport_promisc_mode(struct hclge_vport *vport) +{ + struct hnae3_handle *handle = &vport->nic; + struct hclge_dev *hdev = vport->back; + bool uc_en = false; + bool mc_en = false; + u8 tmp_flags; + bool bc_en; + int ret; + + if (vport->last_promisc_flags != vport->overflow_promisc_flags) { + set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state); + vport->last_promisc_flags = vport->overflow_promisc_flags; + } + + if (!test_and_clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, + &vport->state)) + return 0; + + /* for PF */ + if (!vport->vport_id) { + tmp_flags = handle->netdev_flags | vport->last_promisc_flags; + ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE, + tmp_flags & HNAE3_MPE); + if (!ret) + set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, + &vport->state); + else + set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, + &vport->state); + return ret; + } + + /* for VF */ + if (vport->vf_info.trusted) { + uc_en = vport->vf_info.request_uc_en > 0 || + vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE; + mc_en = vport->vf_info.request_mc_en > 0 || + vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE; + } + bc_en = vport->vf_info.request_bc_en > 0; + + ret = hclge_cmd_set_promisc_mode(hdev, vport->vport_id, uc_en, + mc_en, bc_en); + if (ret) { + set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state); + return ret; + } + hclge_set_vport_vlan_fltr_change(vport); + + return 0; +} + +static void hclge_sync_promisc_mode(struct hclge_dev *hdev) +{ + struct hclge_vport *vport; + int ret; + u16 i; + + for (i = 0; i < hdev->num_alloc_vport; i++) { + vport = &hdev->vport[i]; + + ret = hclge_sync_vport_promisc_mode(vport); + if (ret) + return; + } +} + +static bool hclge_module_existed(struct hclge_dev *hdev) +{ + struct hclge_desc desc; + u32 existed; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get SFP exist state, ret = %d\n", ret); + return false; + } + + existed = le32_to_cpu(desc.data[0]); + + return existed != 0; +} + +/* need 6 bds(total 140 bytes) in one reading + * return the number of bytes actually read, 0 means read failed. + */ +static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset, + u32 len, u8 *data) +{ + struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM]; + struct hclge_sfp_info_bd0_cmd *sfp_info_bd0; + u16 read_len; + u16 copy_len; + int ret; + int i; + + /* setup all 6 bds to read module eeprom info. */ + for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) { + hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM, + true); + + /* bd0~bd4 need next flag */ + if (i < HCLGE_SFP_INFO_CMD_NUM - 1) + desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + } + + /* setup bd0, this bd contains offset and read length. */ + sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data; + sfp_info_bd0->offset = cpu_to_le16((u16)offset); + read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN); + sfp_info_bd0->read_len = cpu_to_le16(read_len); + + ret = hclge_cmd_send(&hdev->hw, desc, i); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get SFP eeprom info, ret = %d\n", ret); + return 0; + } + + /* copy sfp info from bd0 to out buffer. */ + copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN); + memcpy(data, sfp_info_bd0->data, copy_len); + read_len = copy_len; + + /* copy sfp info from bd1~bd5 to out buffer if needed. */ + for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) { + if (read_len >= len) + return read_len; + + copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN); + memcpy(data + read_len, desc[i].data, copy_len); + read_len += copy_len; + } + + return read_len; +} + +static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset, + u32 len, u8 *data) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + u32 read_len = 0; + u16 data_len; + + if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER) + return -EOPNOTSUPP; + + if (!hclge_module_existed(hdev)) + return -ENXIO; + + while (read_len < len) { + data_len = hclge_get_sfp_eeprom_info(hdev, + offset + read_len, + len - read_len, + data + read_len); + if (!data_len) + return -EIO; + + read_len += data_len; + } + + return 0; +} + +static int hclge_get_link_diagnosis_info(struct hnae3_handle *handle, + u32 *status_code) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hclge_desc desc; + int ret; + + if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) + return -EOPNOTSUPP; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_DIAGNOSIS, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to query link diagnosis info, ret = %d\n", ret); + return ret; + } + + *status_code = le32_to_cpu(desc.data[0]); + return 0; +} + +/* After disable sriov, VF still has some config and info need clean, + * which configed by PF. + */ +static void hclge_clear_vport_vf_info(struct hclge_vport *vport, int vfid) +{ + struct hclge_dev *hdev = vport->back; + struct hclge_vlan_info vlan_info; + int ret; + + clear_bit(HCLGE_VPORT_STATE_INITED, &vport->state); + clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); + vport->need_notify = 0; + vport->mps = 0; + + /* after disable sriov, clean VF rate configured by PF */ + ret = hclge_tm_qs_shaper_cfg(vport, 0); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to clean vf%d rate config, ret = %d\n", + vfid, ret); + + vlan_info.vlan_tag = 0; + vlan_info.qos = 0; + vlan_info.vlan_proto = ETH_P_8021Q; + ret = hclge_update_port_base_vlan_cfg(vport, + HNAE3_PORT_BASE_VLAN_DISABLE, + &vlan_info); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to clean vf%d port base vlan, ret = %d\n", + vfid, ret); + + ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, false); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to clean vf%d spoof config, ret = %d\n", + vfid, ret); + + memset(&vport->vf_info, 0, sizeof(vport->vf_info)); +} + +static void hclge_clean_vport_config(struct hnae3_ae_dev *ae_dev, int num_vfs) +{ + struct hclge_dev *hdev = ae_dev->priv; + struct hclge_vport *vport; + int i; + + for (i = 0; i < num_vfs; i++) { + vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM]; + + hclge_clear_vport_vf_info(vport, i); + } +} + +static int hclge_get_dscp_prio(struct hnae3_handle *h, u8 dscp, u8 *tc_mode, + u8 *priority) +{ + struct hclge_vport *vport = hclge_get_vport(h); + + if (dscp >= HNAE3_MAX_DSCP) + return -EINVAL; + + if (tc_mode) + *tc_mode = vport->nic.kinfo.tc_map_mode; + if (priority) + *priority = vport->nic.kinfo.dscp_prio[dscp] == HNAE3_PRIO_ID_INVALID ? 0 : + vport->nic.kinfo.dscp_prio[dscp]; + + return 0; +} + +static const struct hnae3_ae_ops hclge_ops = { + .init_ae_dev = hclge_init_ae_dev, + .uninit_ae_dev = hclge_uninit_ae_dev, + .reset_prepare = hclge_reset_prepare_general, + .reset_done = hclge_reset_done, + .init_client_instance = hclge_init_client_instance, + .uninit_client_instance = hclge_uninit_client_instance, + .map_ring_to_vector = hclge_map_ring_to_vector, + .unmap_ring_from_vector = hclge_unmap_ring_frm_vector, + .get_vector = hclge_get_vector, + .put_vector = hclge_put_vector, + .set_promisc_mode = hclge_set_promisc_mode, + .request_update_promisc_mode = hclge_request_update_promisc_mode, + .set_loopback = hclge_set_loopback, + .start = hclge_ae_start, + .stop = hclge_ae_stop, + .client_start = hclge_client_start, + .client_stop = hclge_client_stop, + .get_status = hclge_get_status, + .get_ksettings_an_result = hclge_get_ksettings_an_result, + .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h, + .get_media_type = hclge_get_media_type, + .check_port_speed = hclge_check_port_speed, + .get_fec_stats = hclge_get_fec_stats, + .get_fec = hclge_get_fec, + .set_fec = hclge_set_fec, + .get_rss_key_size = hclge_comm_get_rss_key_size, + .get_rss = hclge_get_rss, + .set_rss = hclge_set_rss, + .set_rss_tuple = hclge_set_rss_tuple, + .get_rss_tuple = hclge_get_rss_tuple, + .get_tc_size = hclge_get_tc_size, + .get_mac_addr = hclge_get_mac_addr, + .set_mac_addr = hclge_set_mac_addr, + .do_ioctl = hclge_do_ioctl, + .add_uc_addr = hclge_add_uc_addr, + .rm_uc_addr = hclge_rm_uc_addr, + .add_mc_addr = hclge_add_mc_addr, + .rm_mc_addr = hclge_rm_mc_addr, + .set_autoneg = hclge_set_autoneg, + .get_autoneg = hclge_get_autoneg, + .restart_autoneg = hclge_restart_autoneg, + .halt_autoneg = hclge_halt_autoneg, + .get_pauseparam = hclge_get_pauseparam, + .set_pauseparam = hclge_set_pauseparam, + .set_mtu = hclge_set_mtu, + .reset_queue = hclge_reset_tqp, + .get_stats = hclge_get_stats, + .get_mac_stats = hclge_get_mac_stat, + .update_stats = hclge_update_stats, + .get_strings = hclge_get_strings, + .get_sset_count = hclge_get_sset_count, + .get_fw_version = hclge_get_fw_version, + .get_mdix_mode = hclge_get_mdix_mode, + .enable_vlan_filter = hclge_enable_vlan_filter, + .set_vlan_filter = hclge_set_vlan_filter, + .set_vf_vlan_filter = hclge_set_vf_vlan_filter, + .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag, + .reset_event = hclge_reset_event, + .get_reset_level = hclge_get_reset_level, + .set_default_reset_request = hclge_set_def_reset_request, + .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info, + .set_channels = hclge_set_channels, + .get_channels = hclge_get_channels, + .get_regs_len = hclge_get_regs_len, + .get_regs = hclge_get_regs, + .set_led_id = hclge_set_led_id, + .get_link_mode = hclge_get_link_mode, + .add_fd_entry = hclge_add_fd_entry, + .del_fd_entry = hclge_del_fd_entry, + .get_fd_rule_cnt = hclge_get_fd_rule_cnt, + .get_fd_rule_info = hclge_get_fd_rule_info, + .get_fd_all_rules = hclge_get_all_rules, + .enable_fd = hclge_enable_fd, + .add_arfs_entry = hclge_add_fd_entry_by_arfs, + .dbg_read_cmd = hclge_dbg_read_cmd, + .handle_hw_ras_error = hclge_handle_hw_ras_error, + .get_hw_reset_stat = hclge_get_hw_reset_stat, + .ae_dev_resetting = hclge_ae_dev_resetting, + .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt, + .set_gro_en = hclge_gro_en, + .get_global_queue_id = hclge_covert_handle_qid_global, + .set_timer_task = hclge_set_timer_task, + .mac_connect_phy = hclge_mac_connect_phy, + .mac_disconnect_phy = hclge_mac_disconnect_phy, + .get_vf_config = hclge_get_vf_config, + .set_vf_link_state = hclge_set_vf_link_state, + .set_vf_spoofchk = hclge_set_vf_spoofchk, + .set_vf_trust = hclge_set_vf_trust, + .set_vf_rate = hclge_set_vf_rate, + .set_vf_mac = hclge_set_vf_mac, + .get_module_eeprom = hclge_get_module_eeprom, + .get_cmdq_stat = hclge_get_cmdq_stat, + .add_cls_flower = hclge_add_cls_flower, + .del_cls_flower = hclge_del_cls_flower, + .cls_flower_active = hclge_is_cls_flower_active, + .get_phy_link_ksettings = hclge_get_phy_link_ksettings, + .set_phy_link_ksettings = hclge_set_phy_link_ksettings, + .set_tx_hwts_info = hclge_ptp_set_tx_info, + .get_rx_hwts = hclge_ptp_get_rx_hwts, + .get_ts_info = hclge_ptp_get_ts_info, + .get_link_diagnosis_info = hclge_get_link_diagnosis_info, + .clean_vf_config = hclge_clean_vport_config, + .get_dscp_prio = hclge_get_dscp_prio, +}; + +static struct hnae3_ae_algo ae_algo = { + .ops = &hclge_ops, + .pdev_id_table = ae_algo_pci_tbl, +}; + +static int __init hclge_init(void) +{ + pr_info("%s is initializing\n", HCLGE_NAME); + + hclge_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGE_NAME); + if (!hclge_wq) { + pr_err("%s: failed to create workqueue\n", HCLGE_NAME); + return -ENOMEM; + } + + hnae3_register_ae_algo(&ae_algo); + + return 0; +} + +static void __exit hclge_exit(void) +{ + hnae3_unregister_ae_algo_prepare(&ae_algo); + hnae3_unregister_ae_algo(&ae_algo); + destroy_workqueue(hclge_wq); +} +module_init(hclge_init); +module_exit(hclge_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Huawei Tech. Co., Ltd."); +MODULE_DESCRIPTION("HCLGE Driver"); +MODULE_VERSION(HCLGE_MOD_VERSION); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h new file mode 100644 index 000000000..f6fef790e --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -0,0 +1,1149 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +// Copyright (c) 2016-2017 Hisilicon Limited. + +#ifndef __HCLGE_MAIN_H +#define __HCLGE_MAIN_H +#include <linux/fs.h> +#include <linux/types.h> +#include <linux/phy.h> +#include <linux/if_vlan.h> +#include <linux/kfifo.h> +#include <net/devlink.h> + +#include "hclge_cmd.h" +#include "hclge_ptp.h" +#include "hnae3.h" +#include "hclge_comm_rss.h" +#include "hclge_comm_tqp_stats.h" + +#define HCLGE_MOD_VERSION "1.0" +#define HCLGE_DRIVER_NAME "hclge" + +#define HCLGE_MAX_PF_NUM 8 + +#define HCLGE_VF_VPORT_START_NUM 1 + +#define HCLGE_RD_FIRST_STATS_NUM 2 +#define HCLGE_RD_OTHER_STATS_NUM 4 + +#define HCLGE_INVALID_VPORT 0xffff + +#define HCLGE_PF_CFG_BLOCK_SIZE 32 +#define HCLGE_PF_CFG_DESC_NUM \ + (HCLGE_PF_CFG_BLOCK_SIZE / HCLGE_CFG_RD_LEN_BYTES) + +#define HCLGE_VECTOR_REG_BASE 0x20000 +#define HCLGE_VECTOR_EXT_REG_BASE 0x30000 +#define HCLGE_MISC_VECTOR_REG_BASE 0x20400 + +#define HCLGE_VECTOR_REG_OFFSET 0x4 +#define HCLGE_VECTOR_REG_OFFSET_H 0x1000 +#define HCLGE_VECTOR_VF_OFFSET 0x100000 + +#define HCLGE_NIC_CSQ_DEPTH_REG 0x27008 + +/* bar registers for common func */ +#define HCLGE_GRO_EN_REG 0x28000 +#define HCLGE_RXD_ADV_LAYOUT_EN_REG 0x28008 + +/* bar registers for rcb */ +#define HCLGE_RING_RX_ADDR_L_REG 0x80000 +#define HCLGE_RING_RX_ADDR_H_REG 0x80004 +#define HCLGE_RING_RX_BD_NUM_REG 0x80008 +#define HCLGE_RING_RX_BD_LENGTH_REG 0x8000C +#define HCLGE_RING_RX_MERGE_EN_REG 0x80014 +#define HCLGE_RING_RX_TAIL_REG 0x80018 +#define HCLGE_RING_RX_HEAD_REG 0x8001C +#define HCLGE_RING_RX_FBD_NUM_REG 0x80020 +#define HCLGE_RING_RX_OFFSET_REG 0x80024 +#define HCLGE_RING_RX_FBD_OFFSET_REG 0x80028 +#define HCLGE_RING_RX_STASH_REG 0x80030 +#define HCLGE_RING_RX_BD_ERR_REG 0x80034 +#define HCLGE_RING_TX_ADDR_L_REG 0x80040 +#define HCLGE_RING_TX_ADDR_H_REG 0x80044 +#define HCLGE_RING_TX_BD_NUM_REG 0x80048 +#define HCLGE_RING_TX_PRIORITY_REG 0x8004C +#define HCLGE_RING_TX_TC_REG 0x80050 +#define HCLGE_RING_TX_MERGE_EN_REG 0x80054 +#define HCLGE_RING_TX_TAIL_REG 0x80058 +#define HCLGE_RING_TX_HEAD_REG 0x8005C +#define HCLGE_RING_TX_FBD_NUM_REG 0x80060 +#define HCLGE_RING_TX_OFFSET_REG 0x80064 +#define HCLGE_RING_TX_EBD_NUM_REG 0x80068 +#define HCLGE_RING_TX_EBD_OFFSET_REG 0x80070 +#define HCLGE_RING_TX_BD_ERR_REG 0x80074 +#define HCLGE_RING_EN_REG 0x80090 + +/* bar registers for tqp interrupt */ +#define HCLGE_TQP_INTR_CTRL_REG 0x20000 +#define HCLGE_TQP_INTR_GL0_REG 0x20100 +#define HCLGE_TQP_INTR_GL1_REG 0x20200 +#define HCLGE_TQP_INTR_GL2_REG 0x20300 +#define HCLGE_TQP_INTR_RL_REG 0x20900 + +#define HCLGE_RSS_IND_TBL_SIZE 512 + +#define HCLGE_RSS_TC_SIZE_0 1 +#define HCLGE_RSS_TC_SIZE_1 2 +#define HCLGE_RSS_TC_SIZE_2 4 +#define HCLGE_RSS_TC_SIZE_3 8 +#define HCLGE_RSS_TC_SIZE_4 16 +#define HCLGE_RSS_TC_SIZE_5 32 +#define HCLGE_RSS_TC_SIZE_6 64 +#define HCLGE_RSS_TC_SIZE_7 128 + +#define HCLGE_UMV_TBL_SIZE 3072 +#define HCLGE_DEFAULT_UMV_SPACE_PER_PF \ + (HCLGE_UMV_TBL_SIZE / HCLGE_MAX_PF_NUM) + +#define HCLGE_TQP_RESET_TRY_TIMES 200 + +#define HCLGE_PHY_PAGE_MDIX 0 +#define HCLGE_PHY_PAGE_COPPER 0 + +/* Page Selection Reg. */ +#define HCLGE_PHY_PAGE_REG 22 + +/* Copper Specific Control Register */ +#define HCLGE_PHY_CSC_REG 16 + +/* Copper Specific Status Register */ +#define HCLGE_PHY_CSS_REG 17 + +#define HCLGE_PHY_MDIX_CTRL_S 5 +#define HCLGE_PHY_MDIX_CTRL_M GENMASK(6, 5) + +#define HCLGE_PHY_MDIX_STATUS_B 6 +#define HCLGE_PHY_SPEED_DUP_RESOLVE_B 11 + +#define HCLGE_GET_DFX_REG_TYPE_CNT 4 + +/* Factor used to calculate offset and bitmap of VF num */ +#define HCLGE_VF_NUM_PER_CMD 64 + +#define HCLGE_MAX_QSET_NUM 1024 + +#define HCLGE_DBG_RESET_INFO_LEN 1024 + +enum HLCGE_PORT_TYPE { + HOST_PORT, + NETWORK_PORT +}; + +#define PF_VPORT_ID 0 + +#define HCLGE_PF_ID_S 0 +#define HCLGE_PF_ID_M GENMASK(2, 0) +#define HCLGE_VF_ID_S 3 +#define HCLGE_VF_ID_M GENMASK(10, 3) +#define HCLGE_PORT_TYPE_B 11 +#define HCLGE_NETWORK_PORT_ID_S 0 +#define HCLGE_NETWORK_PORT_ID_M GENMASK(3, 0) + +/* Reset related Registers */ +#define HCLGE_PF_OTHER_INT_REG 0x20600 +#define HCLGE_MISC_RESET_STS_REG 0x20700 +#define HCLGE_MISC_VECTOR_INT_STS 0x20800 +#define HCLGE_GLOBAL_RESET_REG 0x20A00 +#define HCLGE_GLOBAL_RESET_BIT 0 +#define HCLGE_CORE_RESET_BIT 1 +#define HCLGE_IMP_RESET_BIT 2 +#define HCLGE_RESET_INT_M GENMASK(7, 5) +#define HCLGE_FUN_RST_ING 0x20C00 +#define HCLGE_FUN_RST_ING_B 0 + +/* Vector0 register bits define */ +#define HCLGE_VECTOR0_REG_PTP_INT_B 0 +#define HCLGE_VECTOR0_GLOBALRESET_INT_B 5 +#define HCLGE_VECTOR0_CORERESET_INT_B 6 +#define HCLGE_VECTOR0_IMPRESET_INT_B 7 + +/* Vector0 interrupt CMDQ event source register(RW) */ +#define HCLGE_VECTOR0_CMDQ_SRC_REG 0x27100 +/* CMDQ register bits for RX event(=MBX event) */ +#define HCLGE_VECTOR0_RX_CMDQ_INT_B 1 + +#define HCLGE_VECTOR0_IMP_RESET_INT_B 1 +#define HCLGE_VECTOR0_IMP_CMDQ_ERR_B 4U +#define HCLGE_VECTOR0_IMP_RD_POISON_B 5U +#define HCLGE_VECTOR0_ALL_MSIX_ERR_B 6U +#define HCLGE_TRIGGER_IMP_RESET_B 7U + +#define HCLGE_TQP_MEM_SIZE 0x10000 +#define HCLGE_MEM_BAR 4 +/* in the bar4, the first half is for roce, and the second half is for nic */ +#define HCLGE_NIC_MEM_OFFSET(hdev) \ + (pci_resource_len((hdev)->pdev, HCLGE_MEM_BAR) >> 1) +#define HCLGE_TQP_MEM_OFFSET(hdev, i) \ + (HCLGE_NIC_MEM_OFFSET(hdev) + HCLGE_TQP_MEM_SIZE * (i)) + +#define HCLGE_MAC_DEFAULT_FRAME \ + (ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN + ETH_DATA_LEN) +#define HCLGE_MAC_MIN_FRAME 64 +#define HCLGE_MAC_MAX_FRAME 9728 + +#define HCLGE_SUPPORT_1G_BIT BIT(0) +#define HCLGE_SUPPORT_10G_BIT BIT(1) +#define HCLGE_SUPPORT_25G_BIT BIT(2) +#define HCLGE_SUPPORT_50G_BIT BIT(3) +#define HCLGE_SUPPORT_100G_BIT BIT(4) +/* to be compatible with exsit board */ +#define HCLGE_SUPPORT_40G_BIT BIT(5) +#define HCLGE_SUPPORT_100M_BIT BIT(6) +#define HCLGE_SUPPORT_10M_BIT BIT(7) +#define HCLGE_SUPPORT_200G_BIT BIT(8) +#define HCLGE_SUPPORT_GE \ + (HCLGE_SUPPORT_1G_BIT | HCLGE_SUPPORT_100M_BIT | HCLGE_SUPPORT_10M_BIT) + +enum HCLGE_DEV_STATE { + HCLGE_STATE_REINITING, + HCLGE_STATE_DOWN, + HCLGE_STATE_DISABLED, + HCLGE_STATE_REMOVING, + HCLGE_STATE_NIC_REGISTERED, + HCLGE_STATE_ROCE_REGISTERED, + HCLGE_STATE_SERVICE_INITED, + HCLGE_STATE_RST_SERVICE_SCHED, + HCLGE_STATE_RST_HANDLING, + HCLGE_STATE_MBX_SERVICE_SCHED, + HCLGE_STATE_MBX_HANDLING, + HCLGE_STATE_ERR_SERVICE_SCHED, + HCLGE_STATE_STATISTICS_UPDATING, + HCLGE_STATE_LINK_UPDATING, + HCLGE_STATE_RST_FAIL, + HCLGE_STATE_FD_TBL_CHANGED, + HCLGE_STATE_FD_CLEAR_ALL, + HCLGE_STATE_FD_USER_DEF_CHANGED, + HCLGE_STATE_PTP_EN, + HCLGE_STATE_PTP_TX_HANDLING, + HCLGE_STATE_FEC_STATS_UPDATING, + HCLGE_STATE_MAX +}; + +enum hclge_evt_cause { + HCLGE_VECTOR0_EVENT_RST, + HCLGE_VECTOR0_EVENT_MBX, + HCLGE_VECTOR0_EVENT_ERR, + HCLGE_VECTOR0_EVENT_PTP, + HCLGE_VECTOR0_EVENT_OTHER, +}; + +enum HCLGE_MAC_SPEED { + HCLGE_MAC_SPEED_UNKNOWN = 0, /* unknown */ + HCLGE_MAC_SPEED_10M = 10, /* 10 Mbps */ + HCLGE_MAC_SPEED_100M = 100, /* 100 Mbps */ + HCLGE_MAC_SPEED_1G = 1000, /* 1000 Mbps = 1 Gbps */ + HCLGE_MAC_SPEED_10G = 10000, /* 10000 Mbps = 10 Gbps */ + HCLGE_MAC_SPEED_25G = 25000, /* 25000 Mbps = 25 Gbps */ + HCLGE_MAC_SPEED_40G = 40000, /* 40000 Mbps = 40 Gbps */ + HCLGE_MAC_SPEED_50G = 50000, /* 50000 Mbps = 50 Gbps */ + HCLGE_MAC_SPEED_100G = 100000, /* 100000 Mbps = 100 Gbps */ + HCLGE_MAC_SPEED_200G = 200000 /* 200000 Mbps = 200 Gbps */ +}; + +enum HCLGE_MAC_DUPLEX { + HCLGE_MAC_HALF, + HCLGE_MAC_FULL +}; + +#define QUERY_SFP_SPEED 0 +#define QUERY_ACTIVE_SPEED 1 + +struct hclge_mac { + u8 mac_id; + u8 phy_addr; + u8 flag; + u8 media_type; /* port media type, e.g. fibre/copper/backplane */ + u8 mac_addr[ETH_ALEN]; + u8 autoneg; + u8 duplex; + u8 support_autoneg; + u8 speed_type; /* 0: sfp speed, 1: active speed */ + u8 lane_num; + u32 speed; + u32 max_speed; + u32 speed_ability; /* speed ability supported by current media */ + u32 module_type; /* sub media type, e.g. kr/cr/sr/lr */ + u32 fec_mode; /* active fec mode */ + u32 user_fec_mode; + u32 fec_ability; + int link; /* store the link status of mac & phy (if phy exists) */ + struct phy_device *phydev; + struct mii_bus *mdio_bus; + phy_interface_t phy_if; + __ETHTOOL_DECLARE_LINK_MODE_MASK(supported); + __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); +}; + +struct hclge_hw { + struct hclge_comm_hw hw; + struct hclge_mac mac; + int num_vec; +}; + +enum hclge_fc_mode { + HCLGE_FC_NONE, + HCLGE_FC_RX_PAUSE, + HCLGE_FC_TX_PAUSE, + HCLGE_FC_FULL, + HCLGE_FC_PFC, + HCLGE_FC_DEFAULT +}; + +#define HCLGE_FILTER_TYPE_VF 0 +#define HCLGE_FILTER_TYPE_PORT 1 +#define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0) +#define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0) +#define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1) +#define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2) +#define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3) +#define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \ + | HCLGE_FILTER_FE_ROCE_EGRESS_B) +#define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \ + | HCLGE_FILTER_FE_ROCE_INGRESS_B) + +enum hclge_vlan_fltr_cap { + HCLGE_VLAN_FLTR_DEF, + HCLGE_VLAN_FLTR_CAN_MDF, +}; +enum hclge_link_fail_code { + HCLGE_LF_NORMAL, + HCLGE_LF_REF_CLOCK_LOST, + HCLGE_LF_XSFP_TX_DISABLE, + HCLGE_LF_XSFP_ABSENT, +}; + +#define HCLGE_LINK_STATUS_DOWN 0 +#define HCLGE_LINK_STATUS_UP 1 + +#define HCLGE_PG_NUM 4 +#define HCLGE_SCH_MODE_SP 0 +#define HCLGE_SCH_MODE_DWRR 1 +struct hclge_pg_info { + u8 pg_id; + u8 pg_sch_mode; /* 0: sp; 1: dwrr */ + u8 tc_bit_map; + u32 bw_limit; + u8 tc_dwrr[HNAE3_MAX_TC]; +}; + +struct hclge_tc_info { + u8 tc_id; + u8 tc_sch_mode; /* 0: sp; 1: dwrr */ + u8 pgid; + u32 bw_limit; +}; + +struct hclge_cfg { + u8 tc_num; + u8 vlan_fliter_cap; + u16 tqp_desc_num; + u16 rx_buf_len; + u16 vf_rss_size_max; + u16 pf_rss_size_max; + u8 phy_addr; + u8 media_type; + u8 mac_addr[ETH_ALEN]; + u8 default_speed; + u32 numa_node_map; + u32 tx_spare_buf_size; + u16 speed_ability; + u16 umv_space; +}; + +struct hclge_tm_info { + u8 num_tc; + u8 num_pg; /* It must be 1 if vNET-Base schd */ + u8 pg_dwrr[HCLGE_PG_NUM]; + u8 prio_tc[HNAE3_MAX_USER_PRIO]; + struct hclge_pg_info pg_info[HCLGE_PG_NUM]; + struct hclge_tc_info tc_info[HNAE3_MAX_TC]; + enum hclge_fc_mode fc_mode; + u8 hw_pfc_map; /* Allow for packet drop or not on this TC */ + u8 pfc_en; /* PFC enabled or not for user priority */ +}; + +/* max number of mac statistics on each version */ +#define HCLGE_MAC_STATS_MAX_NUM_V1 87 +#define HCLGE_MAC_STATS_MAX_NUM_V2 105 + +struct hclge_comm_stats_str { + char desc[ETH_GSTRING_LEN]; + u32 stats_num; + unsigned long offset; +}; + +/* mac stats ,opcode id: 0x0032 */ +struct hclge_mac_stats { + u64 mac_tx_mac_pause_num; + u64 mac_rx_mac_pause_num; + u64 rsv0; + u64 mac_tx_pfc_pri0_pkt_num; + u64 mac_tx_pfc_pri1_pkt_num; + u64 mac_tx_pfc_pri2_pkt_num; + u64 mac_tx_pfc_pri3_pkt_num; + u64 mac_tx_pfc_pri4_pkt_num; + u64 mac_tx_pfc_pri5_pkt_num; + u64 mac_tx_pfc_pri6_pkt_num; + u64 mac_tx_pfc_pri7_pkt_num; + u64 mac_rx_pfc_pri0_pkt_num; + u64 mac_rx_pfc_pri1_pkt_num; + u64 mac_rx_pfc_pri2_pkt_num; + u64 mac_rx_pfc_pri3_pkt_num; + u64 mac_rx_pfc_pri4_pkt_num; + u64 mac_rx_pfc_pri5_pkt_num; + u64 mac_rx_pfc_pri6_pkt_num; + u64 mac_rx_pfc_pri7_pkt_num; + u64 mac_tx_total_pkt_num; + u64 mac_tx_total_oct_num; + u64 mac_tx_good_pkt_num; + u64 mac_tx_bad_pkt_num; + u64 mac_tx_good_oct_num; + u64 mac_tx_bad_oct_num; + u64 mac_tx_uni_pkt_num; + u64 mac_tx_multi_pkt_num; + u64 mac_tx_broad_pkt_num; + u64 mac_tx_undersize_pkt_num; + u64 mac_tx_oversize_pkt_num; + u64 mac_tx_64_oct_pkt_num; + u64 mac_tx_65_127_oct_pkt_num; + u64 mac_tx_128_255_oct_pkt_num; + u64 mac_tx_256_511_oct_pkt_num; + u64 mac_tx_512_1023_oct_pkt_num; + u64 mac_tx_1024_1518_oct_pkt_num; + u64 mac_tx_1519_2047_oct_pkt_num; + u64 mac_tx_2048_4095_oct_pkt_num; + u64 mac_tx_4096_8191_oct_pkt_num; + u64 rsv1; + u64 mac_tx_8192_9216_oct_pkt_num; + u64 mac_tx_9217_12287_oct_pkt_num; + u64 mac_tx_12288_16383_oct_pkt_num; + u64 mac_tx_1519_max_good_oct_pkt_num; + u64 mac_tx_1519_max_bad_oct_pkt_num; + + u64 mac_rx_total_pkt_num; + u64 mac_rx_total_oct_num; + u64 mac_rx_good_pkt_num; + u64 mac_rx_bad_pkt_num; + u64 mac_rx_good_oct_num; + u64 mac_rx_bad_oct_num; + u64 mac_rx_uni_pkt_num; + u64 mac_rx_multi_pkt_num; + u64 mac_rx_broad_pkt_num; + u64 mac_rx_undersize_pkt_num; + u64 mac_rx_oversize_pkt_num; + u64 mac_rx_64_oct_pkt_num; + u64 mac_rx_65_127_oct_pkt_num; + u64 mac_rx_128_255_oct_pkt_num; + u64 mac_rx_256_511_oct_pkt_num; + u64 mac_rx_512_1023_oct_pkt_num; + u64 mac_rx_1024_1518_oct_pkt_num; + u64 mac_rx_1519_2047_oct_pkt_num; + u64 mac_rx_2048_4095_oct_pkt_num; + u64 mac_rx_4096_8191_oct_pkt_num; + u64 rsv2; + u64 mac_rx_8192_9216_oct_pkt_num; + u64 mac_rx_9217_12287_oct_pkt_num; + u64 mac_rx_12288_16383_oct_pkt_num; + u64 mac_rx_1519_max_good_oct_pkt_num; + u64 mac_rx_1519_max_bad_oct_pkt_num; + + u64 mac_tx_fragment_pkt_num; + u64 mac_tx_undermin_pkt_num; + u64 mac_tx_jabber_pkt_num; + u64 mac_tx_err_all_pkt_num; + u64 mac_tx_from_app_good_pkt_num; + u64 mac_tx_from_app_bad_pkt_num; + u64 mac_rx_fragment_pkt_num; + u64 mac_rx_undermin_pkt_num; + u64 mac_rx_jabber_pkt_num; + u64 mac_rx_fcs_err_pkt_num; + u64 mac_rx_send_app_good_pkt_num; + u64 mac_rx_send_app_bad_pkt_num; + u64 mac_tx_pfc_pause_pkt_num; + u64 mac_rx_pfc_pause_pkt_num; + u64 mac_tx_ctrl_pkt_num; + u64 mac_rx_ctrl_pkt_num; + + /* duration of pfc */ + u64 mac_tx_pfc_pri0_xoff_time; + u64 mac_tx_pfc_pri1_xoff_time; + u64 mac_tx_pfc_pri2_xoff_time; + u64 mac_tx_pfc_pri3_xoff_time; + u64 mac_tx_pfc_pri4_xoff_time; + u64 mac_tx_pfc_pri5_xoff_time; + u64 mac_tx_pfc_pri6_xoff_time; + u64 mac_tx_pfc_pri7_xoff_time; + u64 mac_rx_pfc_pri0_xoff_time; + u64 mac_rx_pfc_pri1_xoff_time; + u64 mac_rx_pfc_pri2_xoff_time; + u64 mac_rx_pfc_pri3_xoff_time; + u64 mac_rx_pfc_pri4_xoff_time; + u64 mac_rx_pfc_pri5_xoff_time; + u64 mac_rx_pfc_pri6_xoff_time; + u64 mac_rx_pfc_pri7_xoff_time; + + /* duration of pause */ + u64 mac_tx_pause_xoff_time; + u64 mac_rx_pause_xoff_time; +}; + +#define HCLGE_STATS_TIMER_INTERVAL 300UL + +/* fec stats ,opcode id: 0x0316 */ +#define HCLGE_FEC_STATS_MAX_LANES 8 +struct hclge_fec_stats { + /* fec rs mode total stats */ + u64 rs_corr_blocks; + u64 rs_uncorr_blocks; + u64 rs_error_blocks; + /* fec base-r mode per lanes stats */ + u64 base_r_lane_num; + u64 base_r_corr_blocks; + u64 base_r_uncorr_blocks; + union { + struct { + u64 base_r_corr_per_lanes[HCLGE_FEC_STATS_MAX_LANES]; + u64 base_r_uncorr_per_lanes[HCLGE_FEC_STATS_MAX_LANES]; + }; + u64 per_lanes[HCLGE_FEC_STATS_MAX_LANES * 2]; + }; +}; + +struct hclge_vlan_type_cfg { + u16 rx_ot_fst_vlan_type; + u16 rx_ot_sec_vlan_type; + u16 rx_in_fst_vlan_type; + u16 rx_in_sec_vlan_type; + u16 tx_ot_vlan_type; + u16 tx_in_vlan_type; +}; + +enum HCLGE_FD_MODE { + HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1, + HCLGE_FD_MODE_DEPTH_1K_WIDTH_400B_STAGE_2, + HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1, + HCLGE_FD_MODE_DEPTH_2K_WIDTH_200B_STAGE_2, +}; + +enum HCLGE_FD_KEY_TYPE { + HCLGE_FD_KEY_BASE_ON_PTYPE, + HCLGE_FD_KEY_BASE_ON_TUPLE, +}; + +enum HCLGE_FD_STAGE { + HCLGE_FD_STAGE_1, + HCLGE_FD_STAGE_2, + MAX_STAGE_NUM, +}; + +/* OUTER_XXX indicates tuples in tunnel header of tunnel packet + * INNER_XXX indicate tuples in tunneled header of tunnel packet or + * tuples of non-tunnel packet + */ +enum HCLGE_FD_TUPLE { + OUTER_DST_MAC, + OUTER_SRC_MAC, + OUTER_VLAN_TAG_FST, + OUTER_VLAN_TAG_SEC, + OUTER_ETH_TYPE, + OUTER_L2_RSV, + OUTER_IP_TOS, + OUTER_IP_PROTO, + OUTER_SRC_IP, + OUTER_DST_IP, + OUTER_L3_RSV, + OUTER_SRC_PORT, + OUTER_DST_PORT, + OUTER_L4_RSV, + OUTER_TUN_VNI, + OUTER_TUN_FLOW_ID, + INNER_DST_MAC, + INNER_SRC_MAC, + INNER_VLAN_TAG_FST, + INNER_VLAN_TAG_SEC, + INNER_ETH_TYPE, + INNER_L2_RSV, + INNER_IP_TOS, + INNER_IP_PROTO, + INNER_SRC_IP, + INNER_DST_IP, + INNER_L3_RSV, + INNER_SRC_PORT, + INNER_DST_PORT, + INNER_L4_RSV, + MAX_TUPLE, +}; + +#define HCLGE_FD_TUPLE_USER_DEF_TUPLES \ + (BIT(INNER_L2_RSV) | BIT(INNER_L3_RSV) | BIT(INNER_L4_RSV)) + +enum HCLGE_FD_META_DATA { + PACKET_TYPE_ID, + IP_FRAGEMENT, + ROCE_TYPE, + NEXT_KEY, + VLAN_NUMBER, + SRC_VPORT, + DST_VPORT, + TUNNEL_PACKET, + MAX_META_DATA, +}; + +enum HCLGE_FD_KEY_OPT { + KEY_OPT_U8, + KEY_OPT_LE16, + KEY_OPT_LE32, + KEY_OPT_MAC, + KEY_OPT_IP, + KEY_OPT_VNI, +}; + +struct key_info { + u8 key_type; + u8 key_length; /* use bit as unit */ + enum HCLGE_FD_KEY_OPT key_opt; + int offset; + int moffset; +}; + +#define MAX_KEY_LENGTH 400 +#define MAX_KEY_DWORDS DIV_ROUND_UP(MAX_KEY_LENGTH / 8, 4) +#define MAX_KEY_BYTES (MAX_KEY_DWORDS * 4) +#define MAX_META_DATA_LENGTH 32 + +#define HCLGE_FD_MAX_USER_DEF_OFFSET 9000 +#define HCLGE_FD_USER_DEF_DATA GENMASK(15, 0) +#define HCLGE_FD_USER_DEF_OFFSET GENMASK(15, 0) +#define HCLGE_FD_USER_DEF_OFFSET_UNMASK GENMASK(15, 0) + +/* assigned by firmware, the real filter number for each pf may be less */ +#define MAX_FD_FILTER_NUM 4096 +#define HCLGE_ARFS_EXPIRE_INTERVAL 5UL + +#define hclge_read_dev(a, reg) \ + hclge_comm_read_reg((a)->hw.io_base, reg) +#define hclge_write_dev(a, reg, value) \ + hclge_comm_write_reg((a)->hw.io_base, reg, value) + +enum HCLGE_FD_ACTIVE_RULE_TYPE { + HCLGE_FD_RULE_NONE, + HCLGE_FD_ARFS_ACTIVE, + HCLGE_FD_EP_ACTIVE, + HCLGE_FD_TC_FLOWER_ACTIVE, +}; + +enum HCLGE_FD_PACKET_TYPE { + NIC_PACKET, + ROCE_PACKET, +}; + +enum HCLGE_FD_ACTION { + HCLGE_FD_ACTION_SELECT_QUEUE, + HCLGE_FD_ACTION_DROP_PACKET, + HCLGE_FD_ACTION_SELECT_TC, +}; + +enum HCLGE_FD_NODE_STATE { + HCLGE_FD_TO_ADD, + HCLGE_FD_TO_DEL, + HCLGE_FD_ACTIVE, + HCLGE_FD_DELETED, +}; + +enum HCLGE_FD_USER_DEF_LAYER { + HCLGE_FD_USER_DEF_NONE, + HCLGE_FD_USER_DEF_L2, + HCLGE_FD_USER_DEF_L3, + HCLGE_FD_USER_DEF_L4, +}; + +#define HCLGE_FD_USER_DEF_LAYER_NUM 3 +struct hclge_fd_user_def_cfg { + u16 ref_cnt; + u16 offset; +}; + +struct hclge_fd_user_def_info { + enum HCLGE_FD_USER_DEF_LAYER layer; + u16 data; + u16 data_mask; + u16 offset; +}; + +struct hclge_fd_key_cfg { + u8 key_sel; + u8 inner_sipv6_word_en; + u8 inner_dipv6_word_en; + u8 outer_sipv6_word_en; + u8 outer_dipv6_word_en; + u32 tuple_active; + u32 meta_data_active; +}; + +struct hclge_fd_cfg { + u8 fd_mode; + u16 max_key_length; /* use bit as unit */ + u32 rule_num[MAX_STAGE_NUM]; /* rule entry number */ + u16 cnt_num[MAX_STAGE_NUM]; /* rule hit counter number */ + struct hclge_fd_key_cfg key_cfg[MAX_STAGE_NUM]; + struct hclge_fd_user_def_cfg user_def_cfg[HCLGE_FD_USER_DEF_LAYER_NUM]; +}; + +#define IPV4_INDEX 3 +#define IPV6_SIZE 4 +struct hclge_fd_rule_tuples { + u8 src_mac[ETH_ALEN]; + u8 dst_mac[ETH_ALEN]; + /* Be compatible for ip address of both ipv4 and ipv6. + * For ipv4 address, we store it in src/dst_ip[3]. + */ + u32 src_ip[IPV6_SIZE]; + u32 dst_ip[IPV6_SIZE]; + u16 src_port; + u16 dst_port; + u16 vlan_tag1; + u16 ether_proto; + u16 l2_user_def; + u16 l3_user_def; + u32 l4_user_def; + u8 ip_tos; + u8 ip_proto; +}; + +struct hclge_fd_rule { + struct hlist_node rule_node; + struct hclge_fd_rule_tuples tuples; + struct hclge_fd_rule_tuples tuples_mask; + u32 unused_tuple; + u32 flow_type; + union { + struct { + unsigned long cookie; + u8 tc; + } cls_flower; + struct { + u16 flow_id; /* only used for arfs */ + } arfs; + struct { + struct hclge_fd_user_def_info user_def; + } ep; + }; + u16 queue_id; + u16 vf_id; + u16 location; + enum HCLGE_FD_ACTIVE_RULE_TYPE rule_type; + enum HCLGE_FD_NODE_STATE state; + u8 action; +}; + +struct hclge_fd_ad_data { + u16 ad_id; + u8 drop_packet; + u8 forward_to_direct_queue; + u16 queue_id; + u8 use_counter; + u8 counter_id; + u8 use_next_stage; + u8 write_rule_id_to_bd; + u8 next_input_key; + u16 rule_id; + u16 tc_size; + u8 override_tc; +}; + +enum HCLGE_MAC_NODE_STATE { + HCLGE_MAC_TO_ADD, + HCLGE_MAC_TO_DEL, + HCLGE_MAC_ACTIVE +}; + +struct hclge_mac_node { + struct list_head node; + enum HCLGE_MAC_NODE_STATE state; + u8 mac_addr[ETH_ALEN]; +}; + +enum HCLGE_MAC_ADDR_TYPE { + HCLGE_MAC_ADDR_UC, + HCLGE_MAC_ADDR_MC +}; + +struct hclge_vport_vlan_cfg { + struct list_head node; + int hd_tbl_status; + u16 vlan_id; +}; + +struct hclge_rst_stats { + u32 reset_done_cnt; /* the number of reset has completed */ + u32 hw_reset_done_cnt; /* the number of HW reset has completed */ + u32 pf_rst_cnt; /* the number of PF reset */ + u32 flr_rst_cnt; /* the number of FLR */ + u32 global_rst_cnt; /* the number of GLOBAL */ + u32 imp_rst_cnt; /* the number of IMP reset */ + u32 reset_cnt; /* the number of reset */ + u32 reset_fail_cnt; /* the number of reset fail */ +}; + +/* time and register status when mac tunnel interruption occur */ +struct hclge_mac_tnl_stats { + u64 time; + u32 status; +}; + +#define HCLGE_RESET_INTERVAL (10 * HZ) +#define HCLGE_WAIT_RESET_DONE 100 + +#pragma pack(1) +struct hclge_vf_vlan_cfg { + u8 mbx_cmd; + u8 subcode; + union { + struct { + u8 is_kill; + __le16 vlan; + __le16 proto; + }; + u8 enable; + }; +}; + +#pragma pack() + +/* For each bit of TCAM entry, it uses a pair of 'x' and + * 'y' to indicate which value to match, like below: + * ---------------------------------- + * | bit x | bit y | search value | + * ---------------------------------- + * | 0 | 0 | always hit | + * ---------------------------------- + * | 1 | 0 | match '0' | + * ---------------------------------- + * | 0 | 1 | match '1' | + * ---------------------------------- + * | 1 | 1 | invalid | + * ---------------------------------- + * Then for input key(k) and mask(v), we can calculate the value by + * the formulae: + * x = (~k) & v + * y = (k ^ ~v) & k + */ +#define calc_x(x, k, v) (x = ~(k) & (v)) +#define calc_y(y, k, v) \ + do { \ + const typeof(k) _k_ = (k); \ + const typeof(v) _v_ = (v); \ + (y) = (_k_ ^ ~_v_) & (_k_); \ + } while (0) + +#define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f)) +#define HCLGE_STATS_READ(p, offset) (*(u64 *)((u8 *)(p) + (offset))) + +#define HCLGE_MAC_TNL_LOG_SIZE 8 +#define HCLGE_VPORT_NUM 256 +struct hclge_dev { + struct pci_dev *pdev; + struct hnae3_ae_dev *ae_dev; + struct hclge_hw hw; + struct hclge_misc_vector misc_vector; + struct hclge_mac_stats mac_stats; + struct hclge_fec_stats fec_stats; + unsigned long state; + unsigned long flr_state; + unsigned long last_reset_time; + + enum hnae3_reset_type reset_type; + enum hnae3_reset_type reset_level; + unsigned long default_reset_request; + unsigned long reset_request; /* reset has been requested */ + unsigned long reset_pending; /* client rst is pending to be served */ + struct hclge_rst_stats rst_stats; + struct semaphore reset_sem; /* protect reset process */ + u32 fw_version; + u16 num_tqps; /* Num task queue pairs of this PF */ + u16 num_req_vfs; /* Num VFs requested for this PF */ + + u16 base_tqp_pid; /* Base task tqp physical id of this PF */ + u16 alloc_rss_size; /* Allocated RSS task queue */ + u16 vf_rss_size_max; /* HW defined VF max RSS task queue */ + u16 pf_rss_size_max; /* HW defined PF max RSS task queue */ + u32 tx_spare_buf_size; /* HW defined TX spare buffer size */ + + u16 fdir_pf_filter_count; /* Num of guaranteed filters for this PF */ + u16 num_alloc_vport; /* Num vports this driver supports */ + u32 numa_node_mask; + u16 rx_buf_len; + u16 num_tx_desc; /* desc num of per tx queue */ + u16 num_rx_desc; /* desc num of per rx queue */ + u8 hw_tc_map; + enum hclge_fc_mode fc_mode_last_time; + u8 support_sfp_query; + +#define HCLGE_FLAG_TC_BASE_SCH_MODE 1 +#define HCLGE_FLAG_VNET_BASE_SCH_MODE 2 + u8 tx_sch_mode; + u8 tc_max; + u8 pfc_max; + + u8 default_up; + u8 dcbx_cap; + struct hclge_tm_info tm_info; + + u16 num_msi; + u16 num_msi_left; + u16 num_msi_used; + u16 *vector_status; + int *vector_irq; + u16 num_nic_msi; /* Num of nic vectors for this PF */ + u16 num_roce_msi; /* Num of roce vectors for this PF */ + + unsigned long service_timer_period; + unsigned long service_timer_previous; + struct timer_list reset_timer; + struct delayed_work service_task; + + bool cur_promisc; + int num_alloc_vfs; /* Actual number of VFs allocated */ + + struct hclge_comm_tqp *htqp; + struct hclge_vport *vport; + + struct dentry *hclge_dbgfs; + + struct hnae3_client *nic_client; + struct hnae3_client *roce_client; + +#define HCLGE_FLAG_MAIN BIT(0) +#define HCLGE_FLAG_DCB_CAPABLE BIT(1) + u32 flag; + + u32 pkt_buf_size; /* Total pf buf size for tx/rx */ + u32 tx_buf_size; /* Tx buffer size for each TC */ + u32 dv_buf_size; /* Dv buffer size for each TC */ + + u32 mps; /* Max packet size */ + /* vport_lock protect resource shared by vports */ + struct mutex vport_lock; + + struct hclge_vlan_type_cfg vlan_type_cfg; + + unsigned long vlan_table[VLAN_N_VID][BITS_TO_LONGS(HCLGE_VPORT_NUM)]; + unsigned long vf_vlan_full[BITS_TO_LONGS(HCLGE_VPORT_NUM)]; + + unsigned long vport_config_block[BITS_TO_LONGS(HCLGE_VPORT_NUM)]; + + struct hclge_fd_cfg fd_cfg; + struct hlist_head fd_rule_list; + spinlock_t fd_rule_lock; /* protect fd_rule_list and fd_bmap */ + u16 hclge_fd_rule_num; + unsigned long serv_processed_cnt; + unsigned long last_serv_processed; + unsigned long last_rst_scheduled; + unsigned long last_mbx_scheduled; + unsigned long fd_bmap[BITS_TO_LONGS(MAX_FD_FILTER_NUM)]; + enum HCLGE_FD_ACTIVE_RULE_TYPE fd_active_type; + u8 fd_en; + bool gro_en; + + u16 wanted_umv_size; + /* max available unicast mac vlan space */ + u16 max_umv_size; + /* private unicast mac vlan space, it's same for PF and its VFs */ + u16 priv_umv_size; + /* unicast mac vlan space shared by PF and its VFs */ + u16 share_umv_size; + /* multicast mac address number used by PF and its VFs */ + u16 used_mc_mac_num; + + DECLARE_KFIFO(mac_tnl_log, struct hclge_mac_tnl_stats, + HCLGE_MAC_TNL_LOG_SIZE); + + struct hclge_ptp *ptp; + struct devlink *devlink; + struct hclge_comm_rss_cfg rss_cfg; +}; + +/* VPort level vlan tag configuration for TX direction */ +struct hclge_tx_vtag_cfg { + bool accept_tag1; /* Whether accept tag1 packet from host */ + bool accept_untag1; /* Whether accept untag1 packet from host */ + bool accept_tag2; + bool accept_untag2; + bool insert_tag1_en; /* Whether insert inner vlan tag */ + bool insert_tag2_en; /* Whether insert outer vlan tag */ + u16 default_tag1; /* The default inner vlan tag to insert */ + u16 default_tag2; /* The default outer vlan tag to insert */ + bool tag_shift_mode_en; +}; + +/* VPort level vlan tag configuration for RX direction */ +struct hclge_rx_vtag_cfg { + bool rx_vlan_offload_en; /* Whether enable rx vlan offload */ + bool strip_tag1_en; /* Whether strip inner vlan tag */ + bool strip_tag2_en; /* Whether strip outer vlan tag */ + bool vlan1_vlan_prionly; /* Inner vlan tag up to descriptor enable */ + bool vlan2_vlan_prionly; /* Outer vlan tag up to descriptor enable */ + bool strip_tag1_discard_en; /* Inner vlan tag discard for BD enable */ + bool strip_tag2_discard_en; /* Outer vlan tag discard for BD enable */ +}; + +enum HCLGE_VPORT_STATE { + HCLGE_VPORT_STATE_ALIVE, + HCLGE_VPORT_STATE_MAC_TBL_CHANGE, + HCLGE_VPORT_STATE_PROMISC_CHANGE, + HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, + HCLGE_VPORT_STATE_INITED, + HCLGE_VPORT_STATE_MAX +}; + +enum HCLGE_VPORT_NEED_NOTIFY { + HCLGE_VPORT_NEED_NOTIFY_RESET, + HCLGE_VPORT_NEED_NOTIFY_VF_VLAN, +}; + +struct hclge_vlan_info { + u16 vlan_proto; /* so far support 802.1Q only */ + u16 qos; + u16 vlan_tag; +}; + +struct hclge_port_base_vlan_config { + u16 state; + bool tbl_sta; + struct hclge_vlan_info vlan_info; + struct hclge_vlan_info old_vlan_info; +}; + +struct hclge_vf_info { + int link_state; + u8 mac[ETH_ALEN]; + u32 spoofchk; + u32 max_tx_rate; + u32 trusted; + u8 request_uc_en; + u8 request_mc_en; + u8 request_bc_en; +}; + +struct hclge_vport { + u16 alloc_tqps; /* Allocated Tx/Rx queues */ + + u16 qs_offset; + u32 bw_limit; /* VSI BW Limit (0 = disabled) */ + u8 dwrr; + + bool req_vlan_fltr_en; + bool cur_vlan_fltr_en; + unsigned long vlan_del_fail_bmap[BITS_TO_LONGS(VLAN_N_VID)]; + struct hclge_port_base_vlan_config port_base_vlan_cfg; + struct hclge_tx_vtag_cfg txvlan_cfg; + struct hclge_rx_vtag_cfg rxvlan_cfg; + + u16 used_umv_num; + + u16 vport_id; + struct hclge_dev *back; /* Back reference to associated dev */ + struct hnae3_handle nic; + struct hnae3_handle roce; + + unsigned long state; + unsigned long need_notify; + unsigned long last_active_jiffies; + u32 mps; /* Max packet size */ + struct hclge_vf_info vf_info; + + u8 overflow_promisc_flags; + u8 last_promisc_flags; + + spinlock_t mac_list_lock; /* protect mac address need to add/detele */ + struct list_head uc_mac_list; /* Store VF unicast table */ + struct list_head mc_mac_list; /* Store VF multicast table */ + + struct list_head vlan_list; /* Store VF vlan table */ +}; + +struct hclge_speed_bit_map { + u32 speed; + u32 speed_bit; +}; + +struct hclge_mac_speed_map { + u32 speed_drv; /* speed defined in driver */ + u32 speed_fw; /* speed defined in firmware */ +}; + +int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc, + bool en_mc_pmc, bool en_bc_pmc); +int hclge_add_uc_addr_common(struct hclge_vport *vport, + const unsigned char *addr); +int hclge_rm_uc_addr_common(struct hclge_vport *vport, + const unsigned char *addr); +int hclge_add_mc_addr_common(struct hclge_vport *vport, + const unsigned char *addr); +int hclge_rm_mc_addr_common(struct hclge_vport *vport, + const unsigned char *addr); + +struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle); +int hclge_bind_ring_with_vector(struct hclge_vport *vport, + int vector_id, bool en, + struct hnae3_ring_chain_node *ring_chain); + +static inline int hclge_get_queue_id(struct hnae3_queue *queue) +{ + struct hclge_comm_tqp *tqp = + container_of(queue, struct hclge_comm_tqp, q); + + return tqp->index; +} + +int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport); +int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex, u8 lane_num); +int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, + u16 vlan_id, bool is_kill); +int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable); + +int hclge_buffer_alloc(struct hclge_dev *hdev); +int hclge_rss_init_hw(struct hclge_dev *hdev); + +void hclge_mbx_handler(struct hclge_dev *hdev); +int hclge_reset_tqp(struct hnae3_handle *handle); +int hclge_cfg_flowctrl(struct hclge_dev *hdev); +int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id); +int hclge_vport_start(struct hclge_vport *vport); +void hclge_vport_stop(struct hclge_vport *vport); +int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu); +int hclge_dbg_read_cmd(struct hnae3_handle *handle, enum hnae3_dbg_cmd cmd, + char *buf, int len); +u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id); +int hclge_notify_client(struct hclge_dev *hdev, + enum hnae3_reset_notify_type type); +int hclge_update_mac_list(struct hclge_vport *vport, + enum HCLGE_MAC_NODE_STATE state, + enum HCLGE_MAC_ADDR_TYPE mac_type, + const unsigned char *addr); +int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport, + const u8 *old_addr, const u8 *new_addr); +void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list, + enum HCLGE_MAC_ADDR_TYPE mac_type); +void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list); +void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev); +void hclge_restore_mac_table_common(struct hclge_vport *vport); +void hclge_restore_vport_port_base_vlan_config(struct hclge_dev *hdev); +void hclge_restore_vport_vlan_table(struct hclge_vport *vport); +int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state, + struct hclge_vlan_info *vlan_info); +int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid, + u16 state, + struct hclge_vlan_info *vlan_info); +void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time); +int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, + struct hclge_desc *desc); +void hclge_report_hw_error(struct hclge_dev *hdev, + enum hnae3_hw_error_type type); +void hclge_inform_vf_promisc_info(struct hclge_vport *vport); +int hclge_dbg_dump_rst_info(struct hclge_dev *hdev, char *buf, int len); +int hclge_push_vf_link_status(struct hclge_vport *vport); +int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en); +int hclge_mac_update_stats(struct hclge_dev *hdev); +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c new file mode 100644 index 000000000..04ff9bf12 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -0,0 +1,1152 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. + +#include "hclge_main.h" +#include "hclge_mbx.h" +#include "hnae3.h" +#include "hclge_comm_rss.h" + +#define CREATE_TRACE_POINTS +#include "hclge_trace.h" + +static u16 hclge_errno_to_resp(int errno) +{ + int resp = abs(errno); + + /* The status for pf to vf msg cmd is u16, constrainted by HW. + * We need to keep the same type with it. + * The intput errno is the stander error code, it's safely to + * use a u16 to store the abs(errno). + */ + return (u16)resp; +} + +/* hclge_gen_resp_to_vf: used to generate a synchronous response to VF when PF + * receives a mailbox message from VF. + * @vport: pointer to struct hclge_vport + * @vf_to_pf_req: pointer to hclge_mbx_vf_to_pf_cmd of the original mailbox + * message + * @resp_status: indicate to VF whether its request success(0) or failed. + */ +static int hclge_gen_resp_to_vf(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *vf_to_pf_req, + struct hclge_respond_to_vf_msg *resp_msg) +{ + struct hclge_mbx_pf_to_vf_cmd *resp_pf_to_vf; + struct hclge_dev *hdev = vport->back; + enum hclge_comm_cmd_status status; + struct hclge_desc desc; + u16 resp; + + resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data; + + if (resp_msg->len > HCLGE_MBX_MAX_RESP_DATA_SIZE) { + dev_err(&hdev->pdev->dev, + "PF fail to gen resp to VF len %u exceeds max len %u\n", + resp_msg->len, + HCLGE_MBX_MAX_RESP_DATA_SIZE); + /* If resp_msg->len is too long, set the value to max length + * and return the msg to VF + */ + resp_msg->len = HCLGE_MBX_MAX_RESP_DATA_SIZE; + } + + hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false); + + resp_pf_to_vf->dest_vfid = vf_to_pf_req->mbx_src_vfid; + resp_pf_to_vf->msg_len = vf_to_pf_req->msg_len; + resp_pf_to_vf->match_id = vf_to_pf_req->match_id; + + resp_pf_to_vf->msg.code = cpu_to_le16(HCLGE_MBX_PF_VF_RESP); + resp_pf_to_vf->msg.vf_mbx_msg_code = + cpu_to_le16(vf_to_pf_req->msg.code); + resp_pf_to_vf->msg.vf_mbx_msg_subcode = + cpu_to_le16(vf_to_pf_req->msg.subcode); + resp = hclge_errno_to_resp(resp_msg->status); + if (resp < SHRT_MAX) { + resp_pf_to_vf->msg.resp_status = cpu_to_le16(resp); + } else { + dev_warn(&hdev->pdev->dev, + "failed to send response to VF, response status %u is out-of-bound\n", + resp); + resp_pf_to_vf->msg.resp_status = cpu_to_le16(EIO); + } + + if (resp_msg->len > 0) + memcpy(resp_pf_to_vf->msg.resp_data, resp_msg->data, + resp_msg->len); + + trace_hclge_pf_mbx_send(hdev, resp_pf_to_vf); + + status = hclge_cmd_send(&hdev->hw, &desc, 1); + if (status) + dev_err(&hdev->pdev->dev, + "failed to send response to VF, status: %d, vfid: %u, code: %u, subcode: %u.\n", + status, vf_to_pf_req->mbx_src_vfid, + vf_to_pf_req->msg.code, vf_to_pf_req->msg.subcode); + + return status; +} + +static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len, + u16 mbx_opcode, u8 dest_vfid) +{ + struct hclge_mbx_pf_to_vf_cmd *resp_pf_to_vf; + struct hclge_dev *hdev = vport->back; + enum hclge_comm_cmd_status status; + struct hclge_desc desc; + + if (msg_len > HCLGE_MBX_MAX_MSG_SIZE) { + dev_err(&hdev->pdev->dev, + "msg data length(=%u) exceeds maximum(=%u)\n", + msg_len, HCLGE_MBX_MAX_MSG_SIZE); + return -EMSGSIZE; + } + + resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data; + + hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false); + + resp_pf_to_vf->dest_vfid = dest_vfid; + resp_pf_to_vf->msg_len = msg_len; + resp_pf_to_vf->msg.code = cpu_to_le16(mbx_opcode); + + memcpy(resp_pf_to_vf->msg.msg_data, msg, msg_len); + + trace_hclge_pf_mbx_send(hdev, resp_pf_to_vf); + + status = hclge_cmd_send(&hdev->hw, &desc, 1); + if (status) + dev_err(&hdev->pdev->dev, + "failed to send mailbox to VF, status: %d, vfid: %u, opcode: %u\n", + status, dest_vfid, mbx_opcode); + + return status; +} + +static int hclge_inform_vf_reset(struct hclge_vport *vport, u16 reset_type) +{ + __le16 msg_data; + u8 dest_vfid; + + dest_vfid = (u8)vport->vport_id; + msg_data = cpu_to_le16(reset_type); + + /* send this requested info to VF */ + return hclge_send_mbx_msg(vport, (u8 *)&msg_data, sizeof(msg_data), + HCLGE_MBX_ASSERTING_RESET, dest_vfid); +} + +int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport) +{ + struct hclge_dev *hdev = vport->back; + u16 reset_type; + + BUILD_BUG_ON(HNAE3_MAX_RESET > U16_MAX); + + if (hdev->reset_type == HNAE3_FUNC_RESET) + reset_type = HNAE3_VF_PF_FUNC_RESET; + else if (hdev->reset_type == HNAE3_FLR_RESET) + reset_type = HNAE3_VF_FULL_RESET; + else + reset_type = HNAE3_VF_FUNC_RESET; + + return hclge_inform_vf_reset(vport, reset_type); +} + +static void hclge_free_vector_ring_chain(struct hnae3_ring_chain_node *head) +{ + struct hnae3_ring_chain_node *chain_tmp, *chain; + + chain = head->next; + + while (chain) { + chain_tmp = chain->next; + kfree_sensitive(chain); + chain = chain_tmp; + } +} + +/* hclge_get_ring_chain_from_mbx: get ring type & tqp id & int_gl idx + * from mailbox message + * msg[0]: opcode + * msg[1]: <not relevant to this function> + * msg[2]: ring_num + * msg[3]: first ring type (TX|RX) + * msg[4]: first tqp id + * msg[5]: first int_gl idx + * msg[6] ~ msg[14]: other ring type, tqp id and int_gl idx + */ +static int hclge_get_ring_chain_from_mbx( + struct hclge_mbx_vf_to_pf_cmd *req, + struct hnae3_ring_chain_node *ring_chain, + struct hclge_vport *vport) +{ + struct hnae3_ring_chain_node *cur_chain, *new_chain; + struct hclge_dev *hdev = vport->back; + int ring_num; + int i; + + ring_num = req->msg.ring_num; + + if (ring_num > HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM) + return -EINVAL; + + for (i = 0; i < ring_num; i++) { + if (req->msg.param[i].tqp_index >= vport->nic.kinfo.rss_size) { + dev_err(&hdev->pdev->dev, "tqp index(%u) is out of range(0-%u)\n", + req->msg.param[i].tqp_index, + vport->nic.kinfo.rss_size - 1U); + return -EINVAL; + } + } + + hnae3_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B, + req->msg.param[0].ring_type); + ring_chain->tqp_index = + hclge_get_queue_id(vport->nic.kinfo.tqp + [req->msg.param[0].tqp_index]); + hnae3_set_field(ring_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, + HNAE3_RING_GL_IDX_S, req->msg.param[0].int_gl_index); + + cur_chain = ring_chain; + + for (i = 1; i < ring_num; i++) { + new_chain = kzalloc(sizeof(*new_chain), GFP_KERNEL); + if (!new_chain) + goto err; + + hnae3_set_bit(new_chain->flag, HNAE3_RING_TYPE_B, + req->msg.param[i].ring_type); + + new_chain->tqp_index = + hclge_get_queue_id(vport->nic.kinfo.tqp + [req->msg.param[i].tqp_index]); + + hnae3_set_field(new_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, + HNAE3_RING_GL_IDX_S, + req->msg.param[i].int_gl_index); + + cur_chain->next = new_chain; + cur_chain = new_chain; + } + + return 0; +err: + hclge_free_vector_ring_chain(ring_chain); + return -ENOMEM; +} + +static int hclge_map_unmap_ring_to_vf_vector(struct hclge_vport *vport, bool en, + struct hclge_mbx_vf_to_pf_cmd *req) +{ + struct hnae3_ring_chain_node ring_chain; + int vector_id = req->msg.vector_id; + int ret; + + memset(&ring_chain, 0, sizeof(ring_chain)); + ret = hclge_get_ring_chain_from_mbx(req, &ring_chain, vport); + if (ret) + return ret; + + ret = hclge_bind_ring_with_vector(vport, vector_id, en, &ring_chain); + + hclge_free_vector_ring_chain(&ring_chain); + + return ret; +} + +static int hclge_query_ring_vector_map(struct hclge_vport *vport, + struct hnae3_ring_chain_node *ring_chain, + struct hclge_desc *desc) +{ + struct hclge_ctrl_vector_chain_cmd *req = + (struct hclge_ctrl_vector_chain_cmd *)desc->data; + struct hclge_dev *hdev = vport->back; + u16 tqp_type_and_id; + int status; + + hclge_cmd_setup_basic_desc(desc, HCLGE_OPC_ADD_RING_TO_VECTOR, true); + + tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[0]); + hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M, HCLGE_INT_TYPE_S, + hnae3_get_bit(ring_chain->flag, HNAE3_RING_TYPE_B)); + hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M, HCLGE_TQP_ID_S, + ring_chain->tqp_index); + req->tqp_type_and_id[0] = cpu_to_le16(tqp_type_and_id); + req->vfid = vport->vport_id; + + status = hclge_cmd_send(&hdev->hw, desc, 1); + if (status) + dev_err(&hdev->pdev->dev, + "Get VF ring vector map info fail, status is %d.\n", + status); + + return status; +} + +static int hclge_get_vf_ring_vector_map(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *req, + struct hclge_respond_to_vf_msg *resp) +{ +#define HCLGE_LIMIT_RING_NUM 1 +#define HCLGE_RING_TYPE_OFFSET 0 +#define HCLGE_TQP_INDEX_OFFSET 1 +#define HCLGE_INT_GL_INDEX_OFFSET 2 +#define HCLGE_VECTOR_ID_OFFSET 3 +#define HCLGE_RING_VECTOR_MAP_INFO_LEN 4 + struct hnae3_ring_chain_node ring_chain; + struct hclge_desc desc; + struct hclge_ctrl_vector_chain_cmd *data = + (struct hclge_ctrl_vector_chain_cmd *)desc.data; + u16 tqp_type_and_id; + u8 int_gl_index; + int ret; + + req->msg.ring_num = HCLGE_LIMIT_RING_NUM; + + memset(&ring_chain, 0, sizeof(ring_chain)); + ret = hclge_get_ring_chain_from_mbx(req, &ring_chain, vport); + if (ret) + return ret; + + ret = hclge_query_ring_vector_map(vport, &ring_chain, &desc); + if (ret) { + hclge_free_vector_ring_chain(&ring_chain); + return ret; + } + + tqp_type_and_id = le16_to_cpu(data->tqp_type_and_id[0]); + int_gl_index = hnae3_get_field(tqp_type_and_id, + HCLGE_INT_GL_IDX_M, HCLGE_INT_GL_IDX_S); + + resp->data[HCLGE_RING_TYPE_OFFSET] = req->msg.param[0].ring_type; + resp->data[HCLGE_TQP_INDEX_OFFSET] = req->msg.param[0].tqp_index; + resp->data[HCLGE_INT_GL_INDEX_OFFSET] = int_gl_index; + resp->data[HCLGE_VECTOR_ID_OFFSET] = data->int_vector_id_l; + resp->len = HCLGE_RING_VECTOR_MAP_INFO_LEN; + + hclge_free_vector_ring_chain(&ring_chain); + + return ret; +} + +static void hclge_set_vf_promisc_mode(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *req) +{ + struct hnae3_handle *handle = &vport->nic; + struct hclge_dev *hdev = vport->back; + + vport->vf_info.request_uc_en = req->msg.en_uc; + vport->vf_info.request_mc_en = req->msg.en_mc; + vport->vf_info.request_bc_en = req->msg.en_bc; + + if (req->msg.en_limit_promisc) + set_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags); + else + clear_bit(HNAE3_PFLAG_LIMIT_PROMISC, + &handle->priv_flags); + + set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state); + hclge_task_schedule(hdev, 0); +} + +static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req) +{ +#define HCLGE_MBX_VF_OLD_MAC_ADDR_OFFSET 6 + + const u8 *mac_addr = (const u8 *)(mbx_req->msg.data); + struct hclge_dev *hdev = vport->back; + int status; + + if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_UC_MODIFY) { + const u8 *old_addr = (const u8 *) + (&mbx_req->msg.data[HCLGE_MBX_VF_OLD_MAC_ADDR_OFFSET]); + + /* If VF MAC has been configured by the host then it + * cannot be overridden by the MAC specified by the VM. + */ + if (!is_zero_ether_addr(vport->vf_info.mac) && + !ether_addr_equal(mac_addr, vport->vf_info.mac)) + return -EPERM; + + if (!is_valid_ether_addr(mac_addr)) + return -EINVAL; + + spin_lock_bh(&vport->mac_list_lock); + status = hclge_update_mac_node_for_dev_addr(vport, old_addr, + mac_addr); + spin_unlock_bh(&vport->mac_list_lock); + hclge_task_schedule(hdev, 0); + } else if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_UC_ADD) { + status = hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, + HCLGE_MAC_ADDR_UC, mac_addr); + } else if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_UC_REMOVE) { + status = hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, + HCLGE_MAC_ADDR_UC, mac_addr); + } else { + dev_err(&hdev->pdev->dev, + "failed to set unicast mac addr, unknown subcode %u\n", + mbx_req->msg.subcode); + return -EIO; + } + + return status; +} + +static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req) +{ + const u8 *mac_addr = (const u8 *)(mbx_req->msg.data); + struct hclge_dev *hdev = vport->back; + + if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_MC_ADD) { + hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, + HCLGE_MAC_ADDR_MC, mac_addr); + } else if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_MC_REMOVE) { + hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, + HCLGE_MAC_ADDR_MC, mac_addr); + } else { + dev_err(&hdev->pdev->dev, + "failed to set mcast mac addr, unknown subcode %u\n", + mbx_req->msg.subcode); + return -EIO; + } + + return 0; +} + +int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid, + u16 state, + struct hclge_vlan_info *vlan_info) +{ + struct hclge_mbx_port_base_vlan base_vlan; + + base_vlan.state = cpu_to_le16(state); + base_vlan.vlan_proto = cpu_to_le16(vlan_info->vlan_proto); + base_vlan.qos = cpu_to_le16(vlan_info->qos); + base_vlan.vlan_tag = cpu_to_le16(vlan_info->vlan_tag); + + return hclge_send_mbx_msg(vport, (u8 *)&base_vlan, sizeof(base_vlan), + HCLGE_MBX_PUSH_VLAN_INFO, vfid); +} + +static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req, + struct hclge_respond_to_vf_msg *resp_msg) +{ +#define HCLGE_MBX_VLAN_STATE_OFFSET 0 +#define HCLGE_MBX_VLAN_INFO_OFFSET 2 + + struct hnae3_handle *handle = &vport->nic; + struct hclge_dev *hdev = vport->back; + struct hclge_vf_vlan_cfg *msg_cmd; + __be16 proto; + u16 vlan_id; + + msg_cmd = (struct hclge_vf_vlan_cfg *)&mbx_req->msg; + switch (msg_cmd->subcode) { + case HCLGE_MBX_VLAN_FILTER: + proto = cpu_to_be16(le16_to_cpu(msg_cmd->proto)); + vlan_id = le16_to_cpu(msg_cmd->vlan); + return hclge_set_vlan_filter(handle, proto, vlan_id, + msg_cmd->is_kill); + case HCLGE_MBX_VLAN_RX_OFF_CFG: + return hclge_en_hw_strip_rxvtag(handle, msg_cmd->enable); + case HCLGE_MBX_GET_PORT_BASE_VLAN_STATE: + /* vf does not need to know about the port based VLAN state + * on device HNAE3_DEVICE_VERSION_V3. So always return disable + * on device HNAE3_DEVICE_VERSION_V3 if vf queries the port + * based VLAN state. + */ + resp_msg->data[0] = + hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3 ? + HNAE3_PORT_BASE_VLAN_DISABLE : + vport->port_base_vlan_cfg.state; + resp_msg->len = sizeof(u8); + return 0; + case HCLGE_MBX_ENABLE_VLAN_FILTER: + return hclge_enable_vport_vlan_filter(vport, msg_cmd->enable); + default: + return 0; + } +} + +static int hclge_set_vf_alive(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req) +{ + bool alive = !!mbx_req->msg.data[0]; + int ret = 0; + + if (alive) + ret = hclge_vport_start(vport); + else + hclge_vport_stop(vport); + + return ret; +} + +static void hclge_get_basic_info(struct hclge_vport *vport, + struct hclge_respond_to_vf_msg *resp_msg) +{ + struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; + struct hnae3_ae_dev *ae_dev = vport->back->ae_dev; + struct hclge_basic_info *basic_info; + unsigned int i; + u32 pf_caps; + + basic_info = (struct hclge_basic_info *)resp_msg->data; + for (i = 0; i < kinfo->tc_info.num_tc; i++) + basic_info->hw_tc_map |= BIT(i); + + pf_caps = le32_to_cpu(basic_info->pf_caps); + if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps)) + hnae3_set_bit(pf_caps, HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B, 1); + + basic_info->pf_caps = cpu_to_le32(pf_caps); + resp_msg->len = HCLGE_MBX_MAX_RESP_DATA_SIZE; +} + +static void hclge_get_vf_queue_info(struct hclge_vport *vport, + struct hclge_respond_to_vf_msg *resp_msg) +{ +#define HCLGE_TQPS_RSS_INFO_LEN 6 + + struct hclge_mbx_vf_queue_info *queue_info; + struct hclge_dev *hdev = vport->back; + + /* get the queue related info */ + queue_info = (struct hclge_mbx_vf_queue_info *)resp_msg->data; + queue_info->num_tqps = cpu_to_le16(vport->alloc_tqps); + queue_info->rss_size = cpu_to_le16(vport->nic.kinfo.rss_size); + queue_info->rx_buf_len = cpu_to_le16(hdev->rx_buf_len); + resp_msg->len = HCLGE_TQPS_RSS_INFO_LEN; +} + +static void hclge_get_vf_mac_addr(struct hclge_vport *vport, + struct hclge_respond_to_vf_msg *resp_msg) +{ + ether_addr_copy(resp_msg->data, vport->vf_info.mac); + resp_msg->len = ETH_ALEN; +} + +static void hclge_get_vf_queue_depth(struct hclge_vport *vport, + struct hclge_respond_to_vf_msg *resp_msg) +{ +#define HCLGE_TQPS_DEPTH_INFO_LEN 4 + + struct hclge_mbx_vf_queue_depth *queue_depth; + struct hclge_dev *hdev = vport->back; + + /* get the queue depth info */ + queue_depth = (struct hclge_mbx_vf_queue_depth *)resp_msg->data; + queue_depth->num_tx_desc = cpu_to_le16(hdev->num_tx_desc); + queue_depth->num_rx_desc = cpu_to_le16(hdev->num_rx_desc); + + resp_msg->len = HCLGE_TQPS_DEPTH_INFO_LEN; +} + +static void hclge_get_vf_media_type(struct hclge_vport *vport, + struct hclge_respond_to_vf_msg *resp_msg) +{ +#define HCLGE_VF_MEDIA_TYPE_OFFSET 0 +#define HCLGE_VF_MODULE_TYPE_OFFSET 1 +#define HCLGE_VF_MEDIA_TYPE_LENGTH 2 + + struct hclge_dev *hdev = vport->back; + + resp_msg->data[HCLGE_VF_MEDIA_TYPE_OFFSET] = + hdev->hw.mac.media_type; + resp_msg->data[HCLGE_VF_MODULE_TYPE_OFFSET] = + hdev->hw.mac.module_type; + resp_msg->len = HCLGE_VF_MEDIA_TYPE_LENGTH; +} + +int hclge_push_vf_link_status(struct hclge_vport *vport) +{ +#define HCLGE_VF_LINK_STATE_UP 1U +#define HCLGE_VF_LINK_STATE_DOWN 0U + + struct hclge_mbx_link_status link_info; + struct hclge_dev *hdev = vport->back; + u16 link_status; + + /* mac.link can only be 0 or 1 */ + switch (vport->vf_info.link_state) { + case IFLA_VF_LINK_STATE_ENABLE: + link_status = HCLGE_VF_LINK_STATE_UP; + break; + case IFLA_VF_LINK_STATE_DISABLE: + link_status = HCLGE_VF_LINK_STATE_DOWN; + break; + case IFLA_VF_LINK_STATE_AUTO: + default: + link_status = (u16)hdev->hw.mac.link; + break; + } + + link_info.link_status = cpu_to_le16(link_status); + link_info.speed = cpu_to_le32(hdev->hw.mac.speed); + link_info.duplex = cpu_to_le16(hdev->hw.mac.duplex); + link_info.flag = HCLGE_MBX_PUSH_LINK_STATUS_EN; + + /* send this requested info to VF */ + return hclge_send_mbx_msg(vport, (u8 *)&link_info, sizeof(link_info), + HCLGE_MBX_LINK_STAT_CHANGE, vport->vport_id); +} + +static void hclge_get_link_mode(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req) +{ +#define HCLGE_SUPPORTED 1 + struct hclge_mbx_link_mode link_mode; + struct hclge_dev *hdev = vport->back; + unsigned long advertising; + unsigned long supported; + unsigned long send_data; + u8 dest_vfid; + + advertising = hdev->hw.mac.advertising[0]; + supported = hdev->hw.mac.supported[0]; + dest_vfid = mbx_req->mbx_src_vfid; + send_data = mbx_req->msg.data[0] == HCLGE_SUPPORTED ? supported : + advertising; + link_mode.idx = cpu_to_le16((u16)mbx_req->msg.data[0]); + link_mode.link_mode = cpu_to_le64(send_data); + + hclge_send_mbx_msg(vport, (u8 *)&link_mode, sizeof(link_mode), + HCLGE_MBX_LINK_STAT_MODE, dest_vfid); +} + +static int hclge_mbx_reset_vf_queue(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req, + struct hclge_respond_to_vf_msg *resp_msg) +{ +#define HCLGE_RESET_ALL_QUEUE_DONE 1U + struct hnae3_handle *handle = &vport->nic; + struct hclge_dev *hdev = vport->back; + u16 queue_id; + int ret; + + queue_id = le16_to_cpu(*(__le16 *)mbx_req->msg.data); + resp_msg->data[0] = HCLGE_RESET_ALL_QUEUE_DONE; + resp_msg->len = sizeof(u8); + + /* pf will reset vf's all queues at a time. So it is unnecessary + * to reset queues if queue_id > 0, just return success. + */ + if (queue_id > 0) + return 0; + + ret = hclge_reset_tqp(handle); + if (ret) + dev_err(&hdev->pdev->dev, "failed to reset vf %u queue, ret = %d\n", + vport->vport_id - HCLGE_VF_VPORT_START_NUM, ret); + + return ret; +} + +static int hclge_reset_vf(struct hclge_vport *vport) +{ + struct hclge_dev *hdev = vport->back; + + dev_warn(&hdev->pdev->dev, "PF received VF reset request from VF %u!", + vport->vport_id - HCLGE_VF_VPORT_START_NUM); + + return hclge_func_reset_cmd(hdev, vport->vport_id); +} + +static void hclge_notify_vf_config(struct hclge_vport *vport) +{ + struct hclge_dev *hdev = vport->back; + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); + struct hclge_port_base_vlan_config *vlan_cfg; + int ret; + + hclge_push_vf_link_status(vport); + if (test_bit(HCLGE_VPORT_NEED_NOTIFY_RESET, &vport->need_notify)) { + ret = hclge_inform_vf_reset(vport, HNAE3_VF_PF_FUNC_RESET); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to inform VF %u reset!", + vport->vport_id - HCLGE_VF_VPORT_START_NUM); + return; + } + vport->need_notify = 0; + return; + } + + if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 && + test_bit(HCLGE_VPORT_NEED_NOTIFY_VF_VLAN, &vport->need_notify)) { + vlan_cfg = &vport->port_base_vlan_cfg; + ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0], + vport->vport_id, + vlan_cfg->state, + &vlan_cfg->vlan_info); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to inform VF %u port base vlan!", + vport->vport_id - HCLGE_VF_VPORT_START_NUM); + return; + } + clear_bit(HCLGE_VPORT_NEED_NOTIFY_VF_VLAN, &vport->need_notify); + } +} + +static void hclge_vf_keep_alive(struct hclge_vport *vport) +{ + struct hclge_dev *hdev = vport->back; + + vport->last_active_jiffies = jiffies; + + if (test_bit(HCLGE_VPORT_STATE_INITED, &vport->state) && + !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) { + set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); + dev_info(&hdev->pdev->dev, "VF %u is alive!", + vport->vport_id - HCLGE_VF_VPORT_START_NUM); + hclge_notify_vf_config(vport); + } +} + +static int hclge_set_vf_mtu(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req) +{ + struct hclge_mbx_mtu_info *mtu_info; + u32 mtu; + + mtu_info = (struct hclge_mbx_mtu_info *)mbx_req->msg.data; + mtu = le32_to_cpu(mtu_info->mtu); + + return hclge_set_vport_mtu(vport, mtu); +} + +static int hclge_get_queue_id_in_pf(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req, + struct hclge_respond_to_vf_msg *resp_msg) +{ + struct hnae3_handle *handle = &vport->nic; + struct hclge_dev *hdev = vport->back; + u16 queue_id, qid_in_pf; + + queue_id = le16_to_cpu(*(__le16 *)mbx_req->msg.data); + if (queue_id >= handle->kinfo.num_tqps) { + dev_err(&hdev->pdev->dev, "Invalid queue id(%u) from VF %u\n", + queue_id, mbx_req->mbx_src_vfid); + return -EINVAL; + } + + qid_in_pf = hclge_covert_handle_qid_global(&vport->nic, queue_id); + *(__le16 *)resp_msg->data = cpu_to_le16(qid_in_pf); + resp_msg->len = sizeof(qid_in_pf); + return 0; +} + +static int hclge_get_rss_key(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req, + struct hclge_respond_to_vf_msg *resp_msg) +{ +#define HCLGE_RSS_MBX_RESP_LEN 8 + struct hclge_dev *hdev = vport->back; + struct hclge_comm_rss_cfg *rss_cfg; + u8 index; + + index = mbx_req->msg.data[0]; + rss_cfg = &hdev->rss_cfg; + + /* Check the query index of rss_hash_key from VF, make sure no + * more than the size of rss_hash_key. + */ + if (((index + 1) * HCLGE_RSS_MBX_RESP_LEN) > + sizeof(rss_cfg->rss_hash_key)) { + dev_warn(&hdev->pdev->dev, + "failed to get the rss hash key, the index(%u) invalid !\n", + index); + return -EINVAL; + } + + memcpy(resp_msg->data, + &rss_cfg->rss_hash_key[index * HCLGE_RSS_MBX_RESP_LEN], + HCLGE_RSS_MBX_RESP_LEN); + resp_msg->len = HCLGE_RSS_MBX_RESP_LEN; + return 0; +} + +static void hclge_link_fail_parse(struct hclge_dev *hdev, u8 link_fail_code) +{ + switch (link_fail_code) { + case HCLGE_LF_REF_CLOCK_LOST: + dev_warn(&hdev->pdev->dev, "Reference clock lost!\n"); + break; + case HCLGE_LF_XSFP_TX_DISABLE: + dev_warn(&hdev->pdev->dev, "SFP tx is disabled!\n"); + break; + case HCLGE_LF_XSFP_ABSENT: + dev_warn(&hdev->pdev->dev, "SFP is absent!\n"); + break; + default: + break; + } +} + +static void hclge_handle_link_change_event(struct hclge_dev *hdev, + struct hclge_mbx_vf_to_pf_cmd *req) +{ + hclge_task_schedule(hdev, 0); + + if (!req->msg.subcode) + hclge_link_fail_parse(hdev, req->msg.data[0]); +} + +static bool hclge_cmd_crq_empty(struct hclge_hw *hw) +{ + u32 tail = hclge_read_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG); + + return tail == hw->hw.cmq.crq.next_to_use; +} + +static void hclge_handle_ncsi_error(struct hclge_dev *hdev) +{ + struct hnae3_ae_dev *ae_dev = hdev->ae_dev; + + ae_dev->ops->set_default_reset_request(ae_dev, HNAE3_GLOBAL_RESET); + dev_warn(&hdev->pdev->dev, "requesting reset due to NCSI error\n"); + ae_dev->ops->reset_event(hdev->pdev, NULL); +} + +static void hclge_handle_vf_tbl(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req) +{ + struct hclge_dev *hdev = vport->back; + struct hclge_vf_vlan_cfg *msg_cmd; + + msg_cmd = (struct hclge_vf_vlan_cfg *)&mbx_req->msg; + if (msg_cmd->subcode == HCLGE_MBX_VPORT_LIST_CLEAR) { + hclge_rm_vport_all_mac_table(vport, true, HCLGE_MAC_ADDR_UC); + hclge_rm_vport_all_mac_table(vport, true, HCLGE_MAC_ADDR_MC); + hclge_rm_vport_all_vlan_table(vport, true); + } else { + dev_warn(&hdev->pdev->dev, "Invalid cmd(%u)\n", + msg_cmd->subcode); + } +} + +static int +hclge_mbx_map_ring_to_vector_handler(struct hclge_mbx_ops_param *param) +{ + return hclge_map_unmap_ring_to_vf_vector(param->vport, true, + param->req); +} + +static int +hclge_mbx_unmap_ring_to_vector_handler(struct hclge_mbx_ops_param *param) +{ + return hclge_map_unmap_ring_to_vf_vector(param->vport, false, + param->req); +} + +static int +hclge_mbx_get_ring_vector_map_handler(struct hclge_mbx_ops_param *param) +{ + int ret; + + ret = hclge_get_vf_ring_vector_map(param->vport, param->req, + param->resp_msg); + if (ret) + dev_err(¶m->vport->back->pdev->dev, + "PF fail(%d) to get VF ring vector map\n", + ret); + return ret; +} + +static int hclge_mbx_set_promisc_mode_handler(struct hclge_mbx_ops_param *param) +{ + hclge_set_vf_promisc_mode(param->vport, param->req); + return 0; +} + +static int hclge_mbx_set_unicast_handler(struct hclge_mbx_ops_param *param) +{ + int ret; + + ret = hclge_set_vf_uc_mac_addr(param->vport, param->req); + if (ret) + dev_err(¶m->vport->back->pdev->dev, + "PF fail(%d) to set VF UC MAC Addr\n", + ret); + return ret; +} + +static int hclge_mbx_set_multicast_handler(struct hclge_mbx_ops_param *param) +{ + int ret; + + ret = hclge_set_vf_mc_mac_addr(param->vport, param->req); + if (ret) + dev_err(¶m->vport->back->pdev->dev, + "PF fail(%d) to set VF MC MAC Addr\n", + ret); + return ret; +} + +static int hclge_mbx_set_vlan_handler(struct hclge_mbx_ops_param *param) +{ + int ret; + + ret = hclge_set_vf_vlan_cfg(param->vport, param->req, param->resp_msg); + if (ret) + dev_err(¶m->vport->back->pdev->dev, + "PF failed(%d) to config VF's VLAN\n", + ret); + return ret; +} + +static int hclge_mbx_set_alive_handler(struct hclge_mbx_ops_param *param) +{ + int ret; + + ret = hclge_set_vf_alive(param->vport, param->req); + if (ret) + dev_err(¶m->vport->back->pdev->dev, + "PF failed(%d) to set VF's ALIVE\n", + ret); + return ret; +} + +static int hclge_mbx_get_qinfo_handler(struct hclge_mbx_ops_param *param) +{ + hclge_get_vf_queue_info(param->vport, param->resp_msg); + return 0; +} + +static int hclge_mbx_get_qdepth_handler(struct hclge_mbx_ops_param *param) +{ + hclge_get_vf_queue_depth(param->vport, param->resp_msg); + return 0; +} + +static int hclge_mbx_get_basic_info_handler(struct hclge_mbx_ops_param *param) +{ + hclge_get_basic_info(param->vport, param->resp_msg); + return 0; +} + +static int hclge_mbx_get_link_status_handler(struct hclge_mbx_ops_param *param) +{ + int ret; + + ret = hclge_push_vf_link_status(param->vport); + if (ret) + dev_err(¶m->vport->back->pdev->dev, + "failed to inform link stat to VF, ret = %d\n", + ret); + return ret; +} + +static int hclge_mbx_queue_reset_handler(struct hclge_mbx_ops_param *param) +{ + return hclge_mbx_reset_vf_queue(param->vport, param->req, + param->resp_msg); +} + +static int hclge_mbx_reset_handler(struct hclge_mbx_ops_param *param) +{ + return hclge_reset_vf(param->vport); +} + +static int hclge_mbx_keep_alive_handler(struct hclge_mbx_ops_param *param) +{ + hclge_vf_keep_alive(param->vport); + return 0; +} + +static int hclge_mbx_set_mtu_handler(struct hclge_mbx_ops_param *param) +{ + int ret; + + ret = hclge_set_vf_mtu(param->vport, param->req); + if (ret) + dev_err(¶m->vport->back->pdev->dev, + "VF fail(%d) to set mtu\n", ret); + return ret; +} + +static int hclge_mbx_get_qid_in_pf_handler(struct hclge_mbx_ops_param *param) +{ + return hclge_get_queue_id_in_pf(param->vport, param->req, + param->resp_msg); +} + +static int hclge_mbx_get_rss_key_handler(struct hclge_mbx_ops_param *param) +{ + return hclge_get_rss_key(param->vport, param->req, param->resp_msg); +} + +static int hclge_mbx_get_link_mode_handler(struct hclge_mbx_ops_param *param) +{ + hclge_get_link_mode(param->vport, param->req); + return 0; +} + +static int +hclge_mbx_get_vf_flr_status_handler(struct hclge_mbx_ops_param *param) +{ + hclge_rm_vport_all_mac_table(param->vport, false, + HCLGE_MAC_ADDR_UC); + hclge_rm_vport_all_mac_table(param->vport, false, + HCLGE_MAC_ADDR_MC); + hclge_rm_vport_all_vlan_table(param->vport, false); + return 0; +} + +static int hclge_mbx_vf_uninit_handler(struct hclge_mbx_ops_param *param) +{ + hclge_rm_vport_all_mac_table(param->vport, true, + HCLGE_MAC_ADDR_UC); + hclge_rm_vport_all_mac_table(param->vport, true, + HCLGE_MAC_ADDR_MC); + hclge_rm_vport_all_vlan_table(param->vport, true); + param->vport->mps = 0; + return 0; +} + +static int hclge_mbx_get_media_type_handler(struct hclge_mbx_ops_param *param) +{ + hclge_get_vf_media_type(param->vport, param->resp_msg); + return 0; +} + +static int hclge_mbx_push_link_status_handler(struct hclge_mbx_ops_param *param) +{ + hclge_handle_link_change_event(param->vport->back, param->req); + return 0; +} + +static int hclge_mbx_get_mac_addr_handler(struct hclge_mbx_ops_param *param) +{ + hclge_get_vf_mac_addr(param->vport, param->resp_msg); + return 0; +} + +static int hclge_mbx_ncsi_error_handler(struct hclge_mbx_ops_param *param) +{ + hclge_handle_ncsi_error(param->vport->back); + return 0; +} + +static int hclge_mbx_handle_vf_tbl_handler(struct hclge_mbx_ops_param *param) +{ + hclge_handle_vf_tbl(param->vport, param->req); + return 0; +} + +static const hclge_mbx_ops_fn hclge_mbx_ops_list[HCLGE_MBX_OPCODE_MAX] = { + [HCLGE_MBX_RESET] = hclge_mbx_reset_handler, + [HCLGE_MBX_SET_UNICAST] = hclge_mbx_set_unicast_handler, + [HCLGE_MBX_SET_MULTICAST] = hclge_mbx_set_multicast_handler, + [HCLGE_MBX_SET_VLAN] = hclge_mbx_set_vlan_handler, + [HCLGE_MBX_MAP_RING_TO_VECTOR] = hclge_mbx_map_ring_to_vector_handler, + [HCLGE_MBX_UNMAP_RING_TO_VECTOR] = hclge_mbx_unmap_ring_to_vector_handler, + [HCLGE_MBX_SET_PROMISC_MODE] = hclge_mbx_set_promisc_mode_handler, + [HCLGE_MBX_GET_QINFO] = hclge_mbx_get_qinfo_handler, + [HCLGE_MBX_GET_QDEPTH] = hclge_mbx_get_qdepth_handler, + [HCLGE_MBX_GET_BASIC_INFO] = hclge_mbx_get_basic_info_handler, + [HCLGE_MBX_GET_RSS_KEY] = hclge_mbx_get_rss_key_handler, + [HCLGE_MBX_GET_MAC_ADDR] = hclge_mbx_get_mac_addr_handler, + [HCLGE_MBX_GET_LINK_STATUS] = hclge_mbx_get_link_status_handler, + [HCLGE_MBX_QUEUE_RESET] = hclge_mbx_queue_reset_handler, + [HCLGE_MBX_KEEP_ALIVE] = hclge_mbx_keep_alive_handler, + [HCLGE_MBX_SET_ALIVE] = hclge_mbx_set_alive_handler, + [HCLGE_MBX_SET_MTU] = hclge_mbx_set_mtu_handler, + [HCLGE_MBX_GET_QID_IN_PF] = hclge_mbx_get_qid_in_pf_handler, + [HCLGE_MBX_GET_LINK_MODE] = hclge_mbx_get_link_mode_handler, + [HCLGE_MBX_GET_MEDIA_TYPE] = hclge_mbx_get_media_type_handler, + [HCLGE_MBX_VF_UNINIT] = hclge_mbx_vf_uninit_handler, + [HCLGE_MBX_HANDLE_VF_TBL] = hclge_mbx_handle_vf_tbl_handler, + [HCLGE_MBX_GET_RING_VECTOR_MAP] = hclge_mbx_get_ring_vector_map_handler, + [HCLGE_MBX_GET_VF_FLR_STATUS] = hclge_mbx_get_vf_flr_status_handler, + [HCLGE_MBX_PUSH_LINK_STATUS] = hclge_mbx_push_link_status_handler, + [HCLGE_MBX_NCSI_ERROR] = hclge_mbx_ncsi_error_handler, +}; + +static void hclge_mbx_request_handling(struct hclge_mbx_ops_param *param) +{ + hclge_mbx_ops_fn cmd_func = NULL; + struct hclge_dev *hdev; + int ret = 0; + + hdev = param->vport->back; + cmd_func = hclge_mbx_ops_list[param->req->msg.code]; + if (cmd_func) + ret = cmd_func(param); + else + dev_err(&hdev->pdev->dev, + "un-supported mailbox message, code = %u\n", + param->req->msg.code); + + /* PF driver should not reply IMP */ + if (hnae3_get_bit(param->req->mbx_need_resp, HCLGE_MBX_NEED_RESP_B) && + param->req->msg.code < HCLGE_MBX_GET_VF_FLR_STATUS) { + param->resp_msg->status = ret; + if (time_is_before_jiffies(hdev->last_mbx_scheduled + + HCLGE_MBX_SCHED_TIMEOUT)) + dev_warn(&hdev->pdev->dev, + "resp vport%u mbx(%u,%u) late\n", + param->req->mbx_src_vfid, + param->req->msg.code, + param->req->msg.subcode); + + hclge_gen_resp_to_vf(param->vport, param->req, param->resp_msg); + } +} + +void hclge_mbx_handler(struct hclge_dev *hdev) +{ + struct hclge_comm_cmq_ring *crq = &hdev->hw.hw.cmq.crq; + struct hclge_respond_to_vf_msg resp_msg; + struct hclge_mbx_vf_to_pf_cmd *req; + struct hclge_mbx_ops_param param; + struct hclge_desc *desc; + unsigned int flag; + + param.resp_msg = &resp_msg; + /* handle all the mailbox requests in the queue */ + while (!hclge_cmd_crq_empty(&hdev->hw)) { + if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, + &hdev->hw.hw.comm_state)) { + dev_warn(&hdev->pdev->dev, + "command queue needs re-initializing\n"); + return; + } + + desc = &crq->desc[crq->next_to_use]; + req = (struct hclge_mbx_vf_to_pf_cmd *)desc->data; + + flag = le16_to_cpu(crq->desc[crq->next_to_use].flag); + if (unlikely(!hnae3_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B))) { + dev_warn(&hdev->pdev->dev, + "dropped invalid mailbox message, code = %u\n", + req->msg.code); + + /* dropping/not processing this invalid message */ + crq->desc[crq->next_to_use].flag = 0; + hclge_mbx_ring_ptr_move_crq(crq); + continue; + } + + trace_hclge_pf_mbx_get(hdev, req); + + /* clear the resp_msg before processing every mailbox message */ + memset(&resp_msg, 0, sizeof(resp_msg)); + param.vport = &hdev->vport[req->mbx_src_vfid]; + param.req = req; + hclge_mbx_request_handling(¶m); + + crq->desc[crq->next_to_use].flag = 0; + hclge_mbx_ring_ptr_move_crq(crq); + } + + /* Write back CMDQ_RQ header pointer, M7 need this pointer */ + hclge_write_dev(&hdev->hw, HCLGE_COMM_NIC_CRQ_HEAD_REG, + crq->next_to_use); +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c new file mode 100644 index 000000000..85fb11de4 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c @@ -0,0 +1,311 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. + +#include <linux/etherdevice.h> +#include <linux/kernel.h> +#include <linux/marvell_phy.h> + +#include "hclge_cmd.h" +#include "hclge_main.h" +#include "hclge_mdio.h" + +enum hclge_mdio_c22_op_seq { + HCLGE_MDIO_C22_WRITE = 1, + HCLGE_MDIO_C22_READ = 2 +}; + +#define HCLGE_MDIO_CTRL_START_B 0 +#define HCLGE_MDIO_CTRL_ST_S 1 +#define HCLGE_MDIO_CTRL_ST_M (0x3 << HCLGE_MDIO_CTRL_ST_S) +#define HCLGE_MDIO_CTRL_OP_S 3 +#define HCLGE_MDIO_CTRL_OP_M (0x3 << HCLGE_MDIO_CTRL_OP_S) + +#define HCLGE_MDIO_PHYID_S 0 +#define HCLGE_MDIO_PHYID_M (0x1f << HCLGE_MDIO_PHYID_S) + +#define HCLGE_MDIO_PHYREG_S 0 +#define HCLGE_MDIO_PHYREG_M (0x1f << HCLGE_MDIO_PHYREG_S) + +#define HCLGE_MDIO_STA_B 0 + +struct hclge_mdio_cfg_cmd { + u8 ctrl_bit; + u8 phyid; + u8 phyad; + u8 rsvd; + __le16 reserve; + __le16 data_wr; + __le16 data_rd; + __le16 sta; +}; + +static int hclge_mdio_write(struct mii_bus *bus, int phyid, int regnum, + u16 data) +{ + struct hclge_mdio_cfg_cmd *mdio_cmd; + struct hclge_dev *hdev = bus->priv; + struct hclge_desc desc; + int ret; + + if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state)) + return -EBUSY; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MDIO_CONFIG, false); + + mdio_cmd = (struct hclge_mdio_cfg_cmd *)desc.data; + + hnae3_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M, + HCLGE_MDIO_PHYID_S, (u32)phyid); + hnae3_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M, + HCLGE_MDIO_PHYREG_S, (u32)regnum); + + hnae3_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1); + hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M, + HCLGE_MDIO_CTRL_ST_S, 1); + hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_OP_M, + HCLGE_MDIO_CTRL_OP_S, HCLGE_MDIO_C22_WRITE); + + mdio_cmd->data_wr = cpu_to_le16(data); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "mdio write fail when sending cmd, status is %d.\n", + ret); + return ret; + } + + return 0; +} + +static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum) +{ + struct hclge_mdio_cfg_cmd *mdio_cmd; + struct hclge_dev *hdev = bus->priv; + struct hclge_desc desc; + int ret; + + if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state)) + return -EBUSY; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MDIO_CONFIG, true); + + mdio_cmd = (struct hclge_mdio_cfg_cmd *)desc.data; + + hnae3_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M, + HCLGE_MDIO_PHYID_S, (u32)phyid); + hnae3_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M, + HCLGE_MDIO_PHYREG_S, (u32)regnum); + + hnae3_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1); + hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M, + HCLGE_MDIO_CTRL_ST_S, 1); + hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_OP_M, + HCLGE_MDIO_CTRL_OP_S, HCLGE_MDIO_C22_READ); + + /* Read out phy data */ + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "mdio read fail when get data, status is %d.\n", + ret); + return ret; + } + + if (hnae3_get_bit(le16_to_cpu(mdio_cmd->sta), HCLGE_MDIO_STA_B)) { + dev_err(&hdev->pdev->dev, "mdio read data error\n"); + return -EIO; + } + + return le16_to_cpu(mdio_cmd->data_rd); +} + +int hclge_mac_mdio_config(struct hclge_dev *hdev) +{ +#define PHY_INEXISTENT 255 + + struct hclge_mac *mac = &hdev->hw.mac; + struct phy_device *phydev; + struct mii_bus *mdio_bus; + int ret; + + if (hdev->hw.mac.phy_addr == PHY_INEXISTENT) { + dev_info(&hdev->pdev->dev, + "no phy device is connected to mdio bus\n"); + return 0; + } else if (hdev->hw.mac.phy_addr >= PHY_MAX_ADDR) { + dev_err(&hdev->pdev->dev, "phy_addr(%u) is too large.\n", + hdev->hw.mac.phy_addr); + return -EINVAL; + } + + mdio_bus = devm_mdiobus_alloc(&hdev->pdev->dev); + if (!mdio_bus) + return -ENOMEM; + + mdio_bus->name = "hisilicon MII bus"; + mdio_bus->read = hclge_mdio_read; + mdio_bus->write = hclge_mdio_write; + snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-%s", "mii", + dev_name(&hdev->pdev->dev)); + + mdio_bus->parent = &hdev->pdev->dev; + mdio_bus->priv = hdev; + mdio_bus->phy_mask = ~(1 << mac->phy_addr); + ret = mdiobus_register(mdio_bus); + if (ret) { + dev_err(mdio_bus->parent, + "failed to register MDIO bus, ret = %d\n", ret); + return ret; + } + + phydev = mdiobus_get_phy(mdio_bus, mac->phy_addr); + if (!phydev) { + dev_err(mdio_bus->parent, "Failed to get phy device\n"); + mdiobus_unregister(mdio_bus); + return -EIO; + } + + mac->phydev = phydev; + mac->mdio_bus = mdio_bus; + + return 0; +} + +static void hclge_mac_adjust_link(struct net_device *netdev) +{ + struct hnae3_handle *h = *((void **)netdev_priv(netdev)); + struct hclge_vport *vport = hclge_get_vport(h); + struct hclge_dev *hdev = vport->back; + int duplex, speed; + int ret; + + /* When phy link down, do nothing */ + if (netdev->phydev->link == 0) + return; + + speed = netdev->phydev->speed; + duplex = netdev->phydev->duplex; + + ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex, 0); + if (ret) + netdev_err(netdev, "failed to adjust link.\n"); + + ret = hclge_cfg_flowctrl(hdev); + if (ret) + netdev_err(netdev, "failed to configure flow control.\n"); +} + +int hclge_mac_connect_phy(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct net_device *netdev = hdev->vport[0].nic.netdev; + struct phy_device *phydev = hdev->hw.mac.phydev; + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; + int ret; + + if (!phydev) + return 0; + + linkmode_clear_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, phydev->supported); + + phydev->dev_flags |= MARVELL_PHY_LED0_LINK_LED1_ACTIVE; + + ret = phy_connect_direct(netdev, phydev, + hclge_mac_adjust_link, + PHY_INTERFACE_MODE_SGMII); + if (ret) { + netdev_err(netdev, "phy_connect_direct err.\n"); + return ret; + } + + linkmode_copy(mask, hdev->hw.mac.supported); + linkmode_and(phydev->supported, phydev->supported, mask); + linkmode_copy(phydev->advertising, phydev->supported); + + /* supported flag is Pause and Asym Pause, but default advertising + * should be rx on, tx on, so need clear Asym Pause in advertising + * flag + */ + linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, + phydev->advertising); + + phy_attached_info(phydev); + + return 0; +} + +void hclge_mac_disconnect_phy(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct phy_device *phydev = hdev->hw.mac.phydev; + + if (!phydev) + return; + + phy_disconnect(phydev); +} + +void hclge_mac_start_phy(struct hclge_dev *hdev) +{ + struct phy_device *phydev = hdev->hw.mac.phydev; + + if (!phydev) + return; + + phy_loopback(phydev, false); + + phy_start(phydev); +} + +void hclge_mac_stop_phy(struct hclge_dev *hdev) +{ + struct net_device *netdev = hdev->vport[0].nic.netdev; + struct phy_device *phydev = netdev->phydev; + + if (!phydev) + return; + + phy_stop(phydev); +} + +u16 hclge_read_phy_reg(struct hclge_dev *hdev, u16 reg_addr) +{ + struct hclge_phy_reg_cmd *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PHY_REG, true); + + req = (struct hclge_phy_reg_cmd *)desc.data; + req->reg_addr = cpu_to_le16(reg_addr); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to read phy reg, ret = %d.\n", ret); + + return le16_to_cpu(req->reg_val); +} + +int hclge_write_phy_reg(struct hclge_dev *hdev, u16 reg_addr, u16 val) +{ + struct hclge_phy_reg_cmd *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PHY_REG, false); + + req = (struct hclge_phy_reg_cmd *)desc.data; + req->reg_addr = cpu_to_le16(reg_addr); + req->reg_val = cpu_to_le16(val); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to write phy reg, ret = %d.\n", ret); + + return ret; +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h new file mode 100644 index 000000000..4200d0b6d --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +// Copyright (c) 2016-2017 Hisilicon Limited. + +#ifndef __HCLGE_MDIO_H +#define __HCLGE_MDIO_H + +#include "hnae3.h" + +struct hclge_dev; + +int hclge_mac_mdio_config(struct hclge_dev *hdev); +int hclge_mac_connect_phy(struct hnae3_handle *handle); +void hclge_mac_disconnect_phy(struct hnae3_handle *handle); +void hclge_mac_start_phy(struct hclge_dev *hdev); +void hclge_mac_stop_phy(struct hclge_dev *hdev); +u16 hclge_read_phy_reg(struct hclge_dev *hdev, u16 reg_addr); +int hclge_write_phy_reg(struct hclge_dev *hdev, u16 reg_addr, u16 val); + +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c new file mode 100644 index 000000000..a40b1583f --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c @@ -0,0 +1,564 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2021 Hisilicon Limited. + +#include <linux/skbuff.h> +#include "hclge_main.h" +#include "hnae3.h" + +static int hclge_ptp_get_cycle(struct hclge_dev *hdev) +{ + struct hclge_ptp *ptp = hdev->ptp; + + ptp->cycle.quo = readl(hdev->ptp->io_base + HCLGE_PTP_CYCLE_QUO_REG) & + HCLGE_PTP_CYCLE_QUO_MASK; + ptp->cycle.numer = readl(hdev->ptp->io_base + HCLGE_PTP_CYCLE_NUM_REG); + ptp->cycle.den = readl(hdev->ptp->io_base + HCLGE_PTP_CYCLE_DEN_REG); + + if (ptp->cycle.den == 0) { + dev_err(&hdev->pdev->dev, "invalid ptp cycle denominator!\n"); + return -EINVAL; + } + + return 0; +} + +static int hclge_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) +{ + struct hclge_dev *hdev = hclge_ptp_get_hdev(ptp); + struct hclge_ptp_cycle *cycle = &hdev->ptp->cycle; + u64 adj_val, adj_base, diff; + unsigned long flags; + bool is_neg = false; + u32 quo, numerator; + + if (ppb < 0) { + ppb = -ppb; + is_neg = true; + } + + adj_base = (u64)cycle->quo * (u64)cycle->den + (u64)cycle->numer; + adj_val = adj_base * ppb; + diff = div_u64(adj_val, 1000000000ULL); + + if (is_neg) + adj_val = adj_base - diff; + else + adj_val = adj_base + diff; + + /* This clock cycle is defined by three part: quotient, numerator + * and denominator. For example, 2.5ns, the quotient is 2, + * denominator is fixed to ptp->cycle.den, and numerator + * is 0.5 * ptp->cycle.den. + */ + quo = div_u64_rem(adj_val, cycle->den, &numerator); + + spin_lock_irqsave(&hdev->ptp->lock, flags); + writel(quo & HCLGE_PTP_CYCLE_QUO_MASK, + hdev->ptp->io_base + HCLGE_PTP_CYCLE_QUO_REG); + writel(numerator, hdev->ptp->io_base + HCLGE_PTP_CYCLE_NUM_REG); + writel(cycle->den, hdev->ptp->io_base + HCLGE_PTP_CYCLE_DEN_REG); + writel(HCLGE_PTP_CYCLE_ADJ_EN, + hdev->ptp->io_base + HCLGE_PTP_CYCLE_CFG_REG); + spin_unlock_irqrestore(&hdev->ptp->lock, flags); + + return 0; +} + +bool hclge_ptp_set_tx_info(struct hnae3_handle *handle, struct sk_buff *skb) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hclge_ptp *ptp = hdev->ptp; + + if (!test_bit(HCLGE_PTP_FLAG_TX_EN, &ptp->flags) || + test_and_set_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state)) { + ptp->tx_skipped++; + return false; + } + + ptp->tx_start = jiffies; + ptp->tx_skb = skb_get(skb); + ptp->tx_cnt++; + + return true; +} + +void hclge_ptp_clean_tx_hwts(struct hclge_dev *hdev) +{ + struct sk_buff *skb = hdev->ptp->tx_skb; + struct skb_shared_hwtstamps hwts; + u32 hi, lo; + u64 ns; + + ns = readl(hdev->ptp->io_base + HCLGE_PTP_TX_TS_NSEC_REG) & + HCLGE_PTP_TX_TS_NSEC_MASK; + lo = readl(hdev->ptp->io_base + HCLGE_PTP_TX_TS_SEC_L_REG); + hi = readl(hdev->ptp->io_base + HCLGE_PTP_TX_TS_SEC_H_REG) & + HCLGE_PTP_TX_TS_SEC_H_MASK; + hdev->ptp->last_tx_seqid = readl(hdev->ptp->io_base + + HCLGE_PTP_TX_TS_SEQID_REG); + + if (skb) { + hdev->ptp->tx_skb = NULL; + hdev->ptp->tx_cleaned++; + + ns += (((u64)hi) << 32 | lo) * NSEC_PER_SEC; + hwts.hwtstamp = ns_to_ktime(ns); + skb_tstamp_tx(skb, &hwts); + dev_kfree_skb_any(skb); + } + + clear_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state); +} + +void hclge_ptp_get_rx_hwts(struct hnae3_handle *handle, struct sk_buff *skb, + u32 nsec, u32 sec) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + unsigned long flags; + u64 ns = nsec; + u32 sec_h; + + if (!test_bit(HCLGE_PTP_FLAG_RX_EN, &hdev->ptp->flags)) + return; + + /* Since the BD does not have enough space for the higher 16 bits of + * second, and this part will not change frequently, so read it + * from register. + */ + spin_lock_irqsave(&hdev->ptp->lock, flags); + sec_h = readl(hdev->ptp->io_base + HCLGE_PTP_CUR_TIME_SEC_H_REG); + spin_unlock_irqrestore(&hdev->ptp->lock, flags); + + ns += (((u64)sec_h) << HCLGE_PTP_SEC_H_OFFSET | sec) * NSEC_PER_SEC; + skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns); + hdev->ptp->last_rx = jiffies; + hdev->ptp->rx_cnt++; +} + +static int hclge_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts, + struct ptp_system_timestamp *sts) +{ + struct hclge_dev *hdev = hclge_ptp_get_hdev(ptp); + unsigned long flags; + u32 hi, lo; + u64 ns; + + spin_lock_irqsave(&hdev->ptp->lock, flags); + ns = readl(hdev->ptp->io_base + HCLGE_PTP_CUR_TIME_NSEC_REG); + hi = readl(hdev->ptp->io_base + HCLGE_PTP_CUR_TIME_SEC_H_REG); + lo = readl(hdev->ptp->io_base + HCLGE_PTP_CUR_TIME_SEC_L_REG); + spin_unlock_irqrestore(&hdev->ptp->lock, flags); + + ns += (((u64)hi) << HCLGE_PTP_SEC_H_OFFSET | lo) * NSEC_PER_SEC; + *ts = ns_to_timespec64(ns); + + return 0; +} + +static int hclge_ptp_settime(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct hclge_dev *hdev = hclge_ptp_get_hdev(ptp); + unsigned long flags; + + spin_lock_irqsave(&hdev->ptp->lock, flags); + writel(ts->tv_nsec, hdev->ptp->io_base + HCLGE_PTP_TIME_NSEC_REG); + writel(ts->tv_sec >> HCLGE_PTP_SEC_H_OFFSET, + hdev->ptp->io_base + HCLGE_PTP_TIME_SEC_H_REG); + writel(ts->tv_sec & HCLGE_PTP_SEC_L_MASK, + hdev->ptp->io_base + HCLGE_PTP_TIME_SEC_L_REG); + /* synchronize the time of phc */ + writel(HCLGE_PTP_TIME_SYNC_EN, + hdev->ptp->io_base + HCLGE_PTP_TIME_SYNC_REG); + spin_unlock_irqrestore(&hdev->ptp->lock, flags); + + return 0; +} + +static int hclge_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + struct hclge_dev *hdev = hclge_ptp_get_hdev(ptp); + unsigned long flags; + bool is_neg = false; + u32 adj_val = 0; + + if (delta < 0) { + adj_val |= HCLGE_PTP_TIME_NSEC_NEG; + delta = -delta; + is_neg = true; + } + + if (delta > HCLGE_PTP_TIME_NSEC_MASK) { + struct timespec64 ts; + s64 ns; + + hclge_ptp_gettimex(ptp, &ts, NULL); + ns = timespec64_to_ns(&ts); + ns = is_neg ? ns - delta : ns + delta; + ts = ns_to_timespec64(ns); + return hclge_ptp_settime(ptp, &ts); + } + + adj_val |= delta & HCLGE_PTP_TIME_NSEC_MASK; + + spin_lock_irqsave(&hdev->ptp->lock, flags); + writel(adj_val, hdev->ptp->io_base + HCLGE_PTP_TIME_NSEC_REG); + writel(HCLGE_PTP_TIME_ADJ_EN, + hdev->ptp->io_base + HCLGE_PTP_TIME_ADJ_REG); + spin_unlock_irqrestore(&hdev->ptp->lock, flags); + + return 0; +} + +int hclge_ptp_get_cfg(struct hclge_dev *hdev, struct ifreq *ifr) +{ + if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state)) + return -EOPNOTSUPP; + + return copy_to_user(ifr->ifr_data, &hdev->ptp->ts_cfg, + sizeof(struct hwtstamp_config)) ? -EFAULT : 0; +} + +static int hclge_ptp_int_en(struct hclge_dev *hdev, bool en) +{ + struct hclge_ptp_int_cmd *req; + struct hclge_desc desc; + int ret; + + req = (struct hclge_ptp_int_cmd *)desc.data; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PTP_INT_EN, false); + req->int_en = en ? 1 : 0; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to %s ptp interrupt, ret = %d\n", + en ? "enable" : "disable", ret); + + return ret; +} + +int hclge_ptp_cfg_qry(struct hclge_dev *hdev, u32 *cfg) +{ + struct hclge_ptp_cfg_cmd *req; + struct hclge_desc desc; + int ret; + + req = (struct hclge_ptp_cfg_cmd *)desc.data; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PTP_MODE_CFG, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to query ptp config, ret = %d\n", ret); + return ret; + } + + *cfg = le32_to_cpu(req->cfg); + + return 0; +} + +static int hclge_ptp_cfg(struct hclge_dev *hdev, u32 cfg) +{ + struct hclge_ptp_cfg_cmd *req; + struct hclge_desc desc; + int ret; + + req = (struct hclge_ptp_cfg_cmd *)desc.data; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PTP_MODE_CFG, false); + req->cfg = cpu_to_le32(cfg); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to config ptp, ret = %d\n", ret); + + return ret; +} + +static int hclge_ptp_set_tx_mode(struct hwtstamp_config *cfg, + unsigned long *flags, u32 *ptp_cfg) +{ + switch (cfg->tx_type) { + case HWTSTAMP_TX_OFF: + clear_bit(HCLGE_PTP_FLAG_TX_EN, flags); + break; + case HWTSTAMP_TX_ON: + set_bit(HCLGE_PTP_FLAG_TX_EN, flags); + *ptp_cfg |= HCLGE_PTP_TX_EN_B; + break; + default: + return -ERANGE; + } + + return 0; +} + +static int hclge_ptp_set_rx_mode(struct hwtstamp_config *cfg, + unsigned long *flags, u32 *ptp_cfg) +{ + int rx_filter = cfg->rx_filter; + + switch (cfg->rx_filter) { + case HWTSTAMP_FILTER_NONE: + clear_bit(HCLGE_PTP_FLAG_RX_EN, flags); + break; + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + set_bit(HCLGE_PTP_FLAG_RX_EN, flags); + *ptp_cfg |= HCLGE_PTP_RX_EN_B; + *ptp_cfg |= HCLGE_PTP_UDP_FULL_TYPE << HCLGE_PTP_UDP_EN_SHIFT; + rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; + break; + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + set_bit(HCLGE_PTP_FLAG_RX_EN, flags); + *ptp_cfg |= HCLGE_PTP_RX_EN_B; + *ptp_cfg |= HCLGE_PTP_UDP_FULL_TYPE << HCLGE_PTP_UDP_EN_SHIFT; + *ptp_cfg |= HCLGE_PTP_MSG1_V2_DEFAULT << HCLGE_PTP_MSG1_SHIFT; + *ptp_cfg |= HCLGE_PTP_MSG0_V2_EVENT << HCLGE_PTP_MSG0_SHIFT; + *ptp_cfg |= HCLGE_PTP_MSG_TYPE_V2 << HCLGE_PTP_MSG_TYPE_SHIFT; + rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + break; + case HWTSTAMP_FILTER_ALL: + default: + return -ERANGE; + } + + cfg->rx_filter = rx_filter; + + return 0; +} + +static int hclge_ptp_set_ts_mode(struct hclge_dev *hdev, + struct hwtstamp_config *cfg) +{ + unsigned long flags = hdev->ptp->flags; + u32 ptp_cfg = 0; + int ret; + + if (test_bit(HCLGE_PTP_FLAG_EN, &hdev->ptp->flags)) + ptp_cfg |= HCLGE_PTP_EN_B; + + ret = hclge_ptp_set_tx_mode(cfg, &flags, &ptp_cfg); + if (ret) + return ret; + + ret = hclge_ptp_set_rx_mode(cfg, &flags, &ptp_cfg); + if (ret) + return ret; + + ret = hclge_ptp_cfg(hdev, ptp_cfg); + if (ret) + return ret; + + hdev->ptp->flags = flags; + hdev->ptp->ptp_cfg = ptp_cfg; + + return 0; +} + +int hclge_ptp_set_cfg(struct hclge_dev *hdev, struct ifreq *ifr) +{ + struct hwtstamp_config cfg; + int ret; + + if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state)) { + dev_err(&hdev->pdev->dev, "phc is unsupported\n"); + return -EOPNOTSUPP; + } + + if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) + return -EFAULT; + + ret = hclge_ptp_set_ts_mode(hdev, &cfg); + if (ret) + return ret; + + hdev->ptp->ts_cfg = cfg; + + return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; +} + +int hclge_ptp_get_ts_info(struct hnae3_handle *handle, + struct ethtool_ts_info *info) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state)) { + dev_err(&hdev->pdev->dev, "phc is unsupported\n"); + return -EOPNOTSUPP; + } + + info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + if (hdev->ptp->clock) + info->phc_index = ptp_clock_index(hdev->ptp->clock); + else + info->phc_index = -1; + + info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON); + + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | + BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | + BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | + BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ); + + info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | + BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | + BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) | + BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | + BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) | + BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | + BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) | + BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ); + + return 0; +} + +static int hclge_ptp_create_clock(struct hclge_dev *hdev) +{ + struct hclge_ptp *ptp; + + ptp = devm_kzalloc(&hdev->pdev->dev, sizeof(*ptp), GFP_KERNEL); + if (!ptp) + return -ENOMEM; + + ptp->hdev = hdev; + snprintf(ptp->info.name, sizeof(ptp->info.name), "%s", + HCLGE_DRIVER_NAME); + ptp->info.owner = THIS_MODULE; + ptp->info.max_adj = HCLGE_PTP_CYCLE_ADJ_MAX; + ptp->info.n_ext_ts = 0; + ptp->info.pps = 0; + ptp->info.adjfreq = hclge_ptp_adjfreq; + ptp->info.adjtime = hclge_ptp_adjtime; + ptp->info.gettimex64 = hclge_ptp_gettimex; + ptp->info.settime64 = hclge_ptp_settime; + + ptp->info.n_alarm = 0; + ptp->clock = ptp_clock_register(&ptp->info, &hdev->pdev->dev); + if (IS_ERR(ptp->clock)) { + dev_err(&hdev->pdev->dev, + "%d failed to register ptp clock, ret = %ld\n", + ptp->info.n_alarm, PTR_ERR(ptp->clock)); + return -ENODEV; + } else if (!ptp->clock) { + dev_err(&hdev->pdev->dev, "failed to register ptp clock\n"); + return -ENODEV; + } + + spin_lock_init(&ptp->lock); + ptp->io_base = hdev->hw.hw.io_base + HCLGE_PTP_REG_OFFSET; + ptp->ts_cfg.rx_filter = HWTSTAMP_FILTER_NONE; + ptp->ts_cfg.tx_type = HWTSTAMP_TX_OFF; + hdev->ptp = ptp; + + return 0; +} + +static void hclge_ptp_destroy_clock(struct hclge_dev *hdev) +{ + ptp_clock_unregister(hdev->ptp->clock); + hdev->ptp->clock = NULL; + devm_kfree(&hdev->pdev->dev, hdev->ptp); + hdev->ptp = NULL; +} + +int hclge_ptp_init(struct hclge_dev *hdev) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); + struct timespec64 ts; + int ret; + + if (!test_bit(HNAE3_DEV_SUPPORT_PTP_B, ae_dev->caps)) + return 0; + + if (!hdev->ptp) { + ret = hclge_ptp_create_clock(hdev); + if (ret) + return ret; + + ret = hclge_ptp_get_cycle(hdev); + if (ret) + return ret; + } + + ret = hclge_ptp_int_en(hdev, true); + if (ret) + goto out; + + set_bit(HCLGE_PTP_FLAG_EN, &hdev->ptp->flags); + ret = hclge_ptp_adjfreq(&hdev->ptp->info, 0); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to init freq, ret = %d\n", ret); + goto out; + } + + ret = hclge_ptp_set_ts_mode(hdev, &hdev->ptp->ts_cfg); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to init ts mode, ret = %d\n", ret); + goto out; + } + + ktime_get_real_ts64(&ts); + ret = hclge_ptp_settime(&hdev->ptp->info, &ts); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to init ts time, ret = %d\n", ret); + goto out; + } + + set_bit(HCLGE_STATE_PTP_EN, &hdev->state); + dev_info(&hdev->pdev->dev, "phc initializes ok!\n"); + + return 0; + +out: + hclge_ptp_destroy_clock(hdev); + + return ret; +} + +void hclge_ptp_uninit(struct hclge_dev *hdev) +{ + struct hclge_ptp *ptp = hdev->ptp; + + if (!ptp) + return; + + hclge_ptp_int_en(hdev, false); + clear_bit(HCLGE_STATE_PTP_EN, &hdev->state); + clear_bit(HCLGE_PTP_FLAG_EN, &ptp->flags); + ptp->ts_cfg.rx_filter = HWTSTAMP_FILTER_NONE; + ptp->ts_cfg.tx_type = HWTSTAMP_TX_OFF; + + if (hclge_ptp_set_ts_mode(hdev, &ptp->ts_cfg)) + dev_err(&hdev->pdev->dev, "failed to disable phc\n"); + + if (ptp->tx_skb) { + struct sk_buff *skb = ptp->tx_skb; + + ptp->tx_skb = NULL; + dev_kfree_skb_any(skb); + } + + hclge_ptp_destroy_clock(hdev); +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h new file mode 100644 index 000000000..bbee74cd8 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h @@ -0,0 +1,143 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +// Copyright (c) 2021 Hisilicon Limited. + +#ifndef __HCLGE_PTP_H +#define __HCLGE_PTP_H + +#include <linux/ptp_clock_kernel.h> +#include <linux/net_tstamp.h> +#include <linux/types.h> + +struct hclge_dev; +struct ifreq; + +#define HCLGE_PTP_REG_OFFSET 0x29000 + +#define HCLGE_PTP_TX_TS_SEQID_REG 0x0 +#define HCLGE_PTP_TX_TS_NSEC_REG 0x4 +#define HCLGE_PTP_TX_TS_NSEC_MASK GENMASK(29, 0) +#define HCLGE_PTP_TX_TS_SEC_L_REG 0x8 +#define HCLGE_PTP_TX_TS_SEC_H_REG 0xC +#define HCLGE_PTP_TX_TS_SEC_H_MASK GENMASK(15, 0) +#define HCLGE_PTP_TX_TS_CNT_REG 0x30 + +#define HCLGE_PTP_TIME_SEC_H_REG 0x50 +#define HCLGE_PTP_TIME_SEC_H_MASK GENMASK(15, 0) +#define HCLGE_PTP_TIME_SEC_L_REG 0x54 +#define HCLGE_PTP_TIME_NSEC_REG 0x58 +#define HCLGE_PTP_TIME_NSEC_MASK GENMASK(29, 0) +#define HCLGE_PTP_TIME_NSEC_NEG BIT(31) +#define HCLGE_PTP_TIME_SYNC_REG 0x5C +#define HCLGE_PTP_TIME_SYNC_EN BIT(0) +#define HCLGE_PTP_TIME_ADJ_REG 0x60 +#define HCLGE_PTP_TIME_ADJ_EN BIT(0) +#define HCLGE_PTP_CYCLE_QUO_REG 0x64 +#define HCLGE_PTP_CYCLE_QUO_MASK GENMASK(7, 0) +#define HCLGE_PTP_CYCLE_DEN_REG 0x68 +#define HCLGE_PTP_CYCLE_NUM_REG 0x6C +#define HCLGE_PTP_CYCLE_CFG_REG 0x70 +#define HCLGE_PTP_CYCLE_ADJ_EN BIT(0) +#define HCLGE_PTP_CUR_TIME_SEC_H_REG 0x74 +#define HCLGE_PTP_CUR_TIME_SEC_L_REG 0x78 +#define HCLGE_PTP_CUR_TIME_NSEC_REG 0x7C + +#define HCLGE_PTP_CYCLE_ADJ_MAX 500000000 +#define HCLGE_PTP_SEC_H_OFFSET 32u +#define HCLGE_PTP_SEC_L_MASK GENMASK(31, 0) + +#define HCLGE_PTP_FLAG_EN 0 +#define HCLGE_PTP_FLAG_TX_EN 1 +#define HCLGE_PTP_FLAG_RX_EN 2 + +struct hclge_ptp_cycle { + u32 quo; + u32 numer; + u32 den; +}; + +struct hclge_ptp { + struct hclge_dev *hdev; + struct ptp_clock *clock; + struct sk_buff *tx_skb; + unsigned long flags; + void __iomem *io_base; + struct ptp_clock_info info; + struct hwtstamp_config ts_cfg; + spinlock_t lock; /* protects ptp registers */ + u32 ptp_cfg; + u32 last_tx_seqid; + struct hclge_ptp_cycle cycle; + unsigned long tx_start; + unsigned long tx_cnt; + unsigned long tx_skipped; + unsigned long tx_cleaned; + unsigned long last_rx; + unsigned long rx_cnt; + unsigned long tx_timeout; +}; + +struct hclge_ptp_int_cmd { +#define HCLGE_PTP_INT_EN_B BIT(0) + + u8 int_en; + u8 rsvd[23]; +}; + +enum hclge_ptp_udp_type { + HCLGE_PTP_UDP_NOT_TYPE, + HCLGE_PTP_UDP_P13F_TYPE, + HCLGE_PTP_UDP_P140_TYPE, + HCLGE_PTP_UDP_FULL_TYPE, +}; + +enum hclge_ptp_msg_type { + HCLGE_PTP_MSG_TYPE_V2_L2, + HCLGE_PTP_MSG_TYPE_V2, + HCLGE_PTP_MSG_TYPE_V2_EVENT, +}; + +enum hclge_ptp_msg0_type { + HCLGE_PTP_MSG0_V2_DELAY_REQ = 1, + HCLGE_PTP_MSG0_V2_PDELAY_REQ, + HCLGE_PTP_MSG0_V2_DELAY_RESP, + HCLGE_PTP_MSG0_V2_EVENT = 0xF, +}; + +#define HCLGE_PTP_MSG1_V2_DEFAULT 1 + +struct hclge_ptp_cfg_cmd { +#define HCLGE_PTP_EN_B BIT(0) +#define HCLGE_PTP_TX_EN_B BIT(1) +#define HCLGE_PTP_RX_EN_B BIT(2) +#define HCLGE_PTP_UDP_EN_SHIFT 3 +#define HCLGE_PTP_UDP_EN_MASK GENMASK(4, 3) +#define HCLGE_PTP_MSG_TYPE_SHIFT 8 +#define HCLGE_PTP_MSG_TYPE_MASK GENMASK(9, 8) +#define HCLGE_PTP_MSG1_SHIFT 16 +#define HCLGE_PTP_MSG1_MASK GENMASK(19, 16) +#define HCLGE_PTP_MSG0_SHIFT 24 +#define HCLGE_PTP_MSG0_MASK GENMASK(27, 24) + + __le32 cfg; + u8 rsvd[20]; +}; + +static inline struct hclge_dev *hclge_ptp_get_hdev(struct ptp_clock_info *info) +{ + struct hclge_ptp *ptp = container_of(info, struct hclge_ptp, info); + + return ptp->hdev; +} + +bool hclge_ptp_set_tx_info(struct hnae3_handle *handle, struct sk_buff *skb); +void hclge_ptp_clean_tx_hwts(struct hclge_dev *hdev); +void hclge_ptp_get_rx_hwts(struct hnae3_handle *handle, struct sk_buff *skb, + u32 nsec, u32 sec); +int hclge_ptp_get_cfg(struct hclge_dev *hdev, struct ifreq *ifr); +int hclge_ptp_set_cfg(struct hclge_dev *hdev, struct ifreq *ifr); +int hclge_ptp_init(struct hclge_dev *hdev); +void hclge_ptp_uninit(struct hclge_dev *hdev); +int hclge_ptp_get_ts_info(struct hnae3_handle *handle, + struct ethtool_ts_info *info); +int hclge_ptp_cfg_qry(struct hclge_dev *hdev, u32 *cfg); +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c new file mode 100644 index 000000000..8b40c6b4e --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c @@ -0,0 +1,2116 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. + +#include <linux/etherdevice.h> + +#include "hclge_cmd.h" +#include "hclge_main.h" +#include "hclge_tm.h" + +enum hclge_shaper_level { + HCLGE_SHAPER_LVL_PRI = 0, + HCLGE_SHAPER_LVL_PG = 1, + HCLGE_SHAPER_LVL_PORT = 2, + HCLGE_SHAPER_LVL_QSET = 3, + HCLGE_SHAPER_LVL_CNT = 4, + HCLGE_SHAPER_LVL_VF = 0, + HCLGE_SHAPER_LVL_PF = 1, +}; + +#define HCLGE_TM_PFC_PKT_GET_CMD_NUM 3 +#define HCLGE_TM_PFC_NUM_GET_PER_CMD 3 + +#define HCLGE_SHAPER_BS_U_DEF 5 +#define HCLGE_SHAPER_BS_S_DEF 20 + +/* hclge_shaper_para_calc: calculate ir parameter for the shaper + * @ir: Rate to be config, its unit is Mbps + * @shaper_level: the shaper level. eg: port, pg, priority, queueset + * @ir_para: parameters of IR shaper + * @max_tm_rate: max tm rate is available to config + * + * the formula: + * + * IR_b * (2 ^ IR_u) * 8 + * IR(Mbps) = ------------------------- * CLOCK(1000Mbps) + * Tick * (2 ^ IR_s) + * + * @return: 0: calculate sucessful, negative: fail + */ +static int hclge_shaper_para_calc(u32 ir, u8 shaper_level, + struct hclge_shaper_ir_para *ir_para, + u32 max_tm_rate) +{ +#define DEFAULT_SHAPER_IR_B 126 +#define DIVISOR_CLK (1000 * 8) +#define DEFAULT_DIVISOR_IR_B (DEFAULT_SHAPER_IR_B * DIVISOR_CLK) + + static const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = { + 6 * 256, /* Prioriy level */ + 6 * 32, /* Prioriy group level */ + 6 * 8, /* Port level */ + 6 * 256 /* Qset level */ + }; + u8 ir_u_calc = 0; + u8 ir_s_calc = 0; + u32 ir_calc; + u32 tick; + + /* Calc tick */ + if (shaper_level >= HCLGE_SHAPER_LVL_CNT || + ir > max_tm_rate) + return -EINVAL; + + tick = tick_array[shaper_level]; + + /** + * Calc the speed if ir_b = 126, ir_u = 0 and ir_s = 0 + * the formula is changed to: + * 126 * 1 * 8 + * ir_calc = ---------------- * 1000 + * tick * 1 + */ + ir_calc = (DEFAULT_DIVISOR_IR_B + (tick >> 1) - 1) / tick; + + if (ir_calc == ir) { + ir_para->ir_b = DEFAULT_SHAPER_IR_B; + ir_para->ir_u = 0; + ir_para->ir_s = 0; + + return 0; + } else if (ir_calc > ir) { + /* Increasing the denominator to select ir_s value */ + while (ir_calc >= ir && ir) { + ir_s_calc++; + ir_calc = DEFAULT_DIVISOR_IR_B / + (tick * (1 << ir_s_calc)); + } + + ir_para->ir_b = (ir * tick * (1 << ir_s_calc) + + (DIVISOR_CLK >> 1)) / DIVISOR_CLK; + } else { + /* Increasing the numerator to select ir_u value */ + u32 numerator; + + while (ir_calc < ir) { + ir_u_calc++; + numerator = DEFAULT_DIVISOR_IR_B * (1 << ir_u_calc); + ir_calc = (numerator + (tick >> 1)) / tick; + } + + if (ir_calc == ir) { + ir_para->ir_b = DEFAULT_SHAPER_IR_B; + } else { + u32 denominator = DIVISOR_CLK * (1 << --ir_u_calc); + ir_para->ir_b = (ir * tick + (denominator >> 1)) / + denominator; + } + } + + ir_para->ir_u = ir_u_calc; + ir_para->ir_s = ir_s_calc; + + return 0; +} + +static const u16 hclge_pfc_tx_stats_offset[] = { + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num), + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num), + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num), + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num), + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num), + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num), + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num), + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num) +}; + +static const u16 hclge_pfc_rx_stats_offset[] = { + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num), + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num), + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num), + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num), + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num), + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num), + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num), + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num) +}; + +static void hclge_pfc_stats_get(struct hclge_dev *hdev, bool tx, u64 *stats) +{ + const u16 *offset; + int i; + + if (tx) + offset = hclge_pfc_tx_stats_offset; + else + offset = hclge_pfc_rx_stats_offset; + + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) + stats[i] = HCLGE_STATS_READ(&hdev->mac_stats, offset[i]); +} + +void hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats) +{ + hclge_pfc_stats_get(hdev, false, stats); +} + +void hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats) +{ + hclge_pfc_stats_get(hdev, true, stats); +} + +int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx) +{ + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PAUSE_EN, false); + + desc.data[0] = cpu_to_le32((tx ? HCLGE_TX_MAC_PAUSE_EN_MSK : 0) | + (rx ? HCLGE_RX_MAC_PAUSE_EN_MSK : 0)); + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap, + u8 pfc_bitmap) +{ + struct hclge_desc desc; + struct hclge_pfc_en_cmd *pfc = (struct hclge_pfc_en_cmd *)desc.data; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PFC_PAUSE_EN, false); + + pfc->tx_rx_en_bitmap = tx_rx_bitmap; + pfc->pri_en_bitmap = pfc_bitmap; + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_pause_param_cfg(struct hclge_dev *hdev, const u8 *addr, + u8 pause_trans_gap, u16 pause_trans_time) +{ + struct hclge_cfg_pause_param_cmd *pause_param; + struct hclge_desc desc; + + pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, false); + + ether_addr_copy(pause_param->mac_addr, addr); + ether_addr_copy(pause_param->mac_addr_extra, addr); + pause_param->pause_trans_gap = pause_trans_gap; + pause_param->pause_trans_time = cpu_to_le16(pause_trans_time); + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr) +{ + struct hclge_cfg_pause_param_cmd *pause_param; + struct hclge_desc desc; + u16 trans_time; + u8 trans_gap; + int ret; + + pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + return ret; + + trans_gap = pause_param->pause_trans_gap; + trans_time = le16_to_cpu(pause_param->pause_trans_time); + + return hclge_pause_param_cfg(hdev, mac_addr, trans_gap, trans_time); +} + +static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id) +{ + u8 tc; + + tc = hdev->tm_info.prio_tc[pri_id]; + + if (tc >= hdev->tm_info.num_tc) + return -EINVAL; + + /** + * the register for priority has four bytes, the first bytes includes + * priority0 and priority1, the higher 4bit stands for priority1 + * while the lower 4bit stands for priority0, as below: + * first byte: | pri_1 | pri_0 | + * second byte: | pri_3 | pri_2 | + * third byte: | pri_5 | pri_4 | + * fourth byte: | pri_7 | pri_6 | + */ + pri[pri_id >> 1] |= tc << ((pri_id & 1) * 4); + + return 0; +} + +int hclge_up_to_tc_map(struct hclge_dev *hdev) +{ + struct hclge_desc desc; + u8 *pri = (u8 *)desc.data; + u8 pri_id; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, false); + + for (pri_id = 0; pri_id < HNAE3_MAX_USER_PRIO; pri_id++) { + ret = hclge_fill_pri_array(hdev, pri, pri_id); + if (ret) + return ret; + } + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static void hclge_dscp_to_prio_map_init(struct hclge_dev *hdev) +{ + u8 i; + + hdev->vport[0].nic.kinfo.tc_map_mode = HNAE3_TC_MAP_MODE_PRIO; + hdev->vport[0].nic.kinfo.dscp_app_cnt = 0; + for (i = 0; i < HNAE3_MAX_DSCP; i++) + hdev->vport[0].nic.kinfo.dscp_prio[i] = HNAE3_PRIO_ID_INVALID; +} + +int hclge_dscp_to_tc_map(struct hclge_dev *hdev) +{ + struct hclge_desc desc[HCLGE_DSCP_MAP_TC_BD_NUM]; + u8 *req0 = (u8 *)desc[0].data; + u8 *req1 = (u8 *)desc[1].data; + u8 pri_id, tc_id, i, j; + + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QOS_MAP, false); + desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_QOS_MAP, false); + + /* The low 32 dscp setting use bd0, high 32 dscp setting use bd1 */ + for (i = 0; i < HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM; i++) { + pri_id = hdev->vport[0].nic.kinfo.dscp_prio[i]; + pri_id = pri_id == HNAE3_PRIO_ID_INVALID ? 0 : pri_id; + tc_id = hdev->tm_info.prio_tc[pri_id]; + /* Each dscp setting has 4 bits, so each byte saves two dscp + * setting + */ + req0[i >> 1] |= tc_id << HCLGE_DSCP_TC_SHIFT(i); + + j = i + HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM; + pri_id = hdev->vport[0].nic.kinfo.dscp_prio[j]; + pri_id = pri_id == HNAE3_PRIO_ID_INVALID ? 0 : pri_id; + tc_id = hdev->tm_info.prio_tc[pri_id]; + req1[i >> 1] |= tc_id << HCLGE_DSCP_TC_SHIFT(i); + } + + return hclge_cmd_send(&hdev->hw, desc, HCLGE_DSCP_MAP_TC_BD_NUM); +} + +static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev *hdev, + u8 pg_id, u8 pri_bit_map) +{ + struct hclge_pg_to_pri_link_cmd *map; + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_TO_PRI_LINK, false); + + map = (struct hclge_pg_to_pri_link_cmd *)desc.data; + + map->pg_id = pg_id; + map->pri_bit_map = pri_bit_map; + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev, u16 qs_id, u8 pri, + bool link_vld) +{ + struct hclge_qs_to_pri_link_cmd *map; + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, false); + + map = (struct hclge_qs_to_pri_link_cmd *)desc.data; + + map->qs_id = cpu_to_le16(qs_id); + map->priority = pri; + map->link_vld = link_vld ? HCLGE_TM_QS_PRI_LINK_VLD_MSK : 0; + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_tm_q_to_qs_map_cfg(struct hclge_dev *hdev, + u16 q_id, u16 qs_id) +{ + struct hclge_nq_to_qs_link_cmd *map; + struct hclge_desc desc; + u16 qs_id_l; + u16 qs_id_h; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, false); + + map = (struct hclge_nq_to_qs_link_cmd *)desc.data; + + map->nq_id = cpu_to_le16(q_id); + + /* convert qs_id to the following format to support qset_id >= 1024 + * qs_id: | 15 | 14 ~ 10 | 9 ~ 0 | + * / / \ \ + * / / \ \ + * qset_id: | 15 ~ 11 | 10 | 9 ~ 0 | + * | qs_id_h | vld | qs_id_l | + */ + qs_id_l = hnae3_get_field(qs_id, HCLGE_TM_QS_ID_L_MSK, + HCLGE_TM_QS_ID_L_S); + qs_id_h = hnae3_get_field(qs_id, HCLGE_TM_QS_ID_H_MSK, + HCLGE_TM_QS_ID_H_S); + hnae3_set_field(qs_id, HCLGE_TM_QS_ID_L_MSK, HCLGE_TM_QS_ID_L_S, + qs_id_l); + hnae3_set_field(qs_id, HCLGE_TM_QS_ID_H_EXT_MSK, HCLGE_TM_QS_ID_H_EXT_S, + qs_id_h); + map->qset_id = cpu_to_le16(qs_id | HCLGE_TM_Q_QS_LINK_VLD_MSK); + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_tm_pg_weight_cfg(struct hclge_dev *hdev, u8 pg_id, + u8 dwrr) +{ + struct hclge_pg_weight_cmd *weight; + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_WEIGHT, false); + + weight = (struct hclge_pg_weight_cmd *)desc.data; + + weight->pg_id = pg_id; + weight->dwrr = dwrr; + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_tm_pri_weight_cfg(struct hclge_dev *hdev, u8 pri_id, + u8 dwrr) +{ + struct hclge_priority_weight_cmd *weight; + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, false); + + weight = (struct hclge_priority_weight_cmd *)desc.data; + + weight->pri_id = pri_id; + weight->dwrr = dwrr; + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_tm_qs_weight_cfg(struct hclge_dev *hdev, u16 qs_id, + u8 dwrr) +{ + struct hclge_qs_weight_cmd *weight; + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, false); + + weight = (struct hclge_qs_weight_cmd *)desc.data; + + weight->qs_id = cpu_to_le16(qs_id); + weight->dwrr = dwrr; + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static u32 hclge_tm_get_shapping_para(u8 ir_b, u8 ir_u, u8 ir_s, + u8 bs_b, u8 bs_s) +{ + u32 shapping_para = 0; + + hclge_tm_set_field(shapping_para, IR_B, ir_b); + hclge_tm_set_field(shapping_para, IR_U, ir_u); + hclge_tm_set_field(shapping_para, IR_S, ir_s); + hclge_tm_set_field(shapping_para, BS_B, bs_b); + hclge_tm_set_field(shapping_para, BS_S, bs_s); + + return shapping_para; +} + +static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev, + enum hclge_shap_bucket bucket, u8 pg_id, + u32 shapping_para, u32 rate) +{ + struct hclge_pg_shapping_cmd *shap_cfg_cmd; + enum hclge_opcode_type opcode; + struct hclge_desc desc; + + opcode = bucket ? HCLGE_OPC_TM_PG_P_SHAPPING : + HCLGE_OPC_TM_PG_C_SHAPPING; + hclge_cmd_setup_basic_desc(&desc, opcode, false); + + shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data; + + shap_cfg_cmd->pg_id = pg_id; + + shap_cfg_cmd->pg_shapping_para = cpu_to_le32(shapping_para); + + hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1); + + shap_cfg_cmd->pg_rate = cpu_to_le32(rate); + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev) +{ + struct hclge_port_shapping_cmd *shap_cfg_cmd; + struct hclge_shaper_ir_para ir_para; + struct hclge_desc desc; + u32 shapping_para; + int ret; + + ret = hclge_shaper_para_calc(hdev->hw.mac.speed, HCLGE_SHAPER_LVL_PORT, + &ir_para, + hdev->ae_dev->dev_specs.max_tm_rate); + if (ret) + return ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, false); + shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data; + + shapping_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u, + ir_para.ir_s, + HCLGE_SHAPER_BS_U_DEF, + HCLGE_SHAPER_BS_S_DEF); + + shap_cfg_cmd->port_shapping_para = cpu_to_le32(shapping_para); + + hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1); + + shap_cfg_cmd->port_rate = cpu_to_le32(hdev->hw.mac.speed); + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev, + enum hclge_shap_bucket bucket, u8 pri_id, + u32 shapping_para, u32 rate) +{ + struct hclge_pri_shapping_cmd *shap_cfg_cmd; + enum hclge_opcode_type opcode; + struct hclge_desc desc; + + opcode = bucket ? HCLGE_OPC_TM_PRI_P_SHAPPING : + HCLGE_OPC_TM_PRI_C_SHAPPING; + + hclge_cmd_setup_basic_desc(&desc, opcode, false); + + shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data; + + shap_cfg_cmd->pri_id = pri_id; + + shap_cfg_cmd->pri_shapping_para = cpu_to_le32(shapping_para); + + hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1); + + shap_cfg_cmd->pri_rate = cpu_to_le32(rate); + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_tm_pg_schd_mode_cfg(struct hclge_dev *hdev, u8 pg_id) +{ + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_SCH_MODE_CFG, false); + + if (hdev->tm_info.pg_info[pg_id].pg_sch_mode == HCLGE_SCH_MODE_DWRR) + desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK); + else + desc.data[1] = 0; + + desc.data[0] = cpu_to_le32(pg_id); + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_tm_pri_schd_mode_cfg(struct hclge_dev *hdev, u8 pri_id) +{ + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, false); + + if (hdev->tm_info.tc_info[pri_id].tc_sch_mode == HCLGE_SCH_MODE_DWRR) + desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK); + else + desc.data[1] = 0; + + desc.data[0] = cpu_to_le32(pri_id); + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_tm_qs_schd_mode_cfg(struct hclge_dev *hdev, u16 qs_id, u8 mode) +{ + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, false); + + if (mode == HCLGE_SCH_MODE_DWRR) + desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK); + else + desc.data[1] = 0; + + desc.data[0] = cpu_to_le32(qs_id); + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc, u8 grp_id, + u32 bit_map) +{ + struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd; + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_BP_TO_QSET_MAPPING, + false); + + bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data; + + bp_to_qs_map_cmd->tc_id = tc; + bp_to_qs_map_cmd->qs_group_id = grp_id; + bp_to_qs_map_cmd->qs_bit_map = cpu_to_le32(bit_map); + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate) +{ + struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; + struct hclge_qs_shapping_cmd *shap_cfg_cmd; + struct hclge_shaper_ir_para ir_para; + struct hclge_dev *hdev = vport->back; + struct hclge_desc desc; + u32 shaper_para; + int ret, i; + + if (!max_tx_rate) + max_tx_rate = hdev->ae_dev->dev_specs.max_tm_rate; + + ret = hclge_shaper_para_calc(max_tx_rate, HCLGE_SHAPER_LVL_QSET, + &ir_para, + hdev->ae_dev->dev_specs.max_tm_rate); + if (ret) + return ret; + + shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u, + ir_para.ir_s, + HCLGE_SHAPER_BS_U_DEF, + HCLGE_SHAPER_BS_S_DEF); + + for (i = 0; i < kinfo->tc_info.num_tc; i++) { + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG, + false); + + shap_cfg_cmd = (struct hclge_qs_shapping_cmd *)desc.data; + shap_cfg_cmd->qs_id = cpu_to_le16(vport->qs_offset + i); + shap_cfg_cmd->qs_shapping_para = cpu_to_le32(shaper_para); + + hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1); + shap_cfg_cmd->qs_rate = cpu_to_le32(max_tx_rate); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "vport%u, qs%u failed to set tx_rate:%d, ret=%d\n", + vport->vport_id, shap_cfg_cmd->qs_id, + max_tx_rate, ret); + return ret; + } + } + + return 0; +} + +static u16 hclge_vport_get_max_rss_size(struct hclge_vport *vport) +{ + struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; + struct hnae3_tc_info *tc_info = &kinfo->tc_info; + struct hclge_dev *hdev = vport->back; + u16 max_rss_size = 0; + int i; + + if (!tc_info->mqprio_active) + return vport->alloc_tqps / tc_info->num_tc; + + for (i = 0; i < HNAE3_MAX_TC; i++) { + if (!(hdev->hw_tc_map & BIT(i)) || i >= tc_info->num_tc) + continue; + if (max_rss_size < tc_info->tqp_count[i]) + max_rss_size = tc_info->tqp_count[i]; + } + + return max_rss_size; +} + +static u16 hclge_vport_get_tqp_num(struct hclge_vport *vport) +{ + struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; + struct hnae3_tc_info *tc_info = &kinfo->tc_info; + struct hclge_dev *hdev = vport->back; + int sum = 0; + int i; + + if (!tc_info->mqprio_active) + return kinfo->rss_size * tc_info->num_tc; + + for (i = 0; i < HNAE3_MAX_TC; i++) { + if (hdev->hw_tc_map & BIT(i) && i < tc_info->num_tc) + sum += tc_info->tqp_count[i]; + } + + return sum; +} + +static void hclge_tm_update_kinfo_rss_size(struct hclge_vport *vport) +{ + struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; + struct hclge_dev *hdev = vport->back; + u16 vport_max_rss_size; + u16 max_rss_size; + + /* TC configuration is shared by PF/VF in one port, only allow + * one tc for VF for simplicity. VF's vport_id is non zero. + */ + if (vport->vport_id) { + kinfo->tc_info.max_tc = 1; + kinfo->tc_info.num_tc = 1; + vport->qs_offset = HNAE3_MAX_TC + + vport->vport_id - HCLGE_VF_VPORT_START_NUM; + vport_max_rss_size = hdev->vf_rss_size_max; + } else { + kinfo->tc_info.max_tc = hdev->tc_max; + kinfo->tc_info.num_tc = + min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc); + vport->qs_offset = 0; + vport_max_rss_size = hdev->pf_rss_size_max; + } + + max_rss_size = min_t(u16, vport_max_rss_size, + hclge_vport_get_max_rss_size(vport)); + + /* Set to user value, no larger than max_rss_size. */ + if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size && + kinfo->req_rss_size <= max_rss_size) { + dev_info(&hdev->pdev->dev, "rss changes from %u to %u\n", + kinfo->rss_size, kinfo->req_rss_size); + kinfo->rss_size = kinfo->req_rss_size; + } else if (kinfo->rss_size > max_rss_size || + (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) { + /* Set to the maximum specification value (max_rss_size). */ + kinfo->rss_size = max_rss_size; + } +} + +static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport) +{ + struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; + struct hclge_dev *hdev = vport->back; + u8 i; + + hclge_tm_update_kinfo_rss_size(vport); + kinfo->num_tqps = hclge_vport_get_tqp_num(vport); + vport->dwrr = 100; /* 100 percent as init */ + vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit; + + if (vport->vport_id == PF_VPORT_ID) + hdev->rss_cfg.rss_size = kinfo->rss_size; + + /* when enable mqprio, the tc_info has been updated. */ + if (kinfo->tc_info.mqprio_active) + return; + + for (i = 0; i < HNAE3_MAX_TC; i++) { + if (hdev->hw_tc_map & BIT(i) && i < kinfo->tc_info.num_tc) { + kinfo->tc_info.tqp_offset[i] = i * kinfo->rss_size; + kinfo->tc_info.tqp_count[i] = kinfo->rss_size; + } else { + /* Set to default queue if TC is disable */ + kinfo->tc_info.tqp_offset[i] = 0; + kinfo->tc_info.tqp_count[i] = 1; + } + } + + memcpy(kinfo->tc_info.prio_tc, hdev->tm_info.prio_tc, + sizeof_field(struct hnae3_tc_info, prio_tc)); +} + +static void hclge_tm_vport_info_update(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = hdev->vport; + u32 i; + + for (i = 0; i < hdev->num_alloc_vport; i++) { + hclge_tm_vport_tc_info_update(vport); + + vport++; + } +} + +static void hclge_tm_tc_info_init(struct hclge_dev *hdev) +{ + u8 i, tc_sch_mode; + u32 bw_limit; + + for (i = 0; i < hdev->tc_max; i++) { + if (i < hdev->tm_info.num_tc) { + tc_sch_mode = HCLGE_SCH_MODE_DWRR; + bw_limit = hdev->tm_info.pg_info[0].bw_limit; + } else { + tc_sch_mode = HCLGE_SCH_MODE_SP; + bw_limit = 0; + } + + hdev->tm_info.tc_info[i].tc_id = i; + hdev->tm_info.tc_info[i].tc_sch_mode = tc_sch_mode; + hdev->tm_info.tc_info[i].pgid = 0; + hdev->tm_info.tc_info[i].bw_limit = bw_limit; + } + + for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) + hdev->tm_info.prio_tc[i] = + (i >= hdev->tm_info.num_tc) ? 0 : i; +} + +static void hclge_tm_pg_info_init(struct hclge_dev *hdev) +{ +#define BW_PERCENT 100 +#define DEFAULT_BW_WEIGHT 1 + + u8 i; + + for (i = 0; i < hdev->tm_info.num_pg; i++) { + int k; + + hdev->tm_info.pg_dwrr[i] = i ? 0 : BW_PERCENT; + + hdev->tm_info.pg_info[i].pg_id = i; + hdev->tm_info.pg_info[i].pg_sch_mode = HCLGE_SCH_MODE_DWRR; + + hdev->tm_info.pg_info[i].bw_limit = + hdev->ae_dev->dev_specs.max_tm_rate; + + if (i != 0) + continue; + + hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map; + for (k = 0; k < hdev->tm_info.num_tc; k++) + hdev->tm_info.pg_info[i].tc_dwrr[k] = BW_PERCENT; + for (; k < HNAE3_MAX_TC; k++) + hdev->tm_info.pg_info[i].tc_dwrr[k] = DEFAULT_BW_WEIGHT; + } +} + +static void hclge_update_fc_mode_by_dcb_flag(struct hclge_dev *hdev) +{ + if (hdev->tm_info.num_tc == 1 && !hdev->tm_info.pfc_en) { + if (hdev->fc_mode_last_time == HCLGE_FC_PFC) + dev_warn(&hdev->pdev->dev, + "Only 1 tc used, but last mode is FC_PFC\n"); + + hdev->tm_info.fc_mode = hdev->fc_mode_last_time; + } else if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) { + /* fc_mode_last_time record the last fc_mode when + * DCB is enabled, so that fc_mode can be set to + * the correct value when DCB is disabled. + */ + hdev->fc_mode_last_time = hdev->tm_info.fc_mode; + hdev->tm_info.fc_mode = HCLGE_FC_PFC; + } +} + +static void hclge_update_fc_mode(struct hclge_dev *hdev) +{ + if (!hdev->tm_info.pfc_en) { + hdev->tm_info.fc_mode = hdev->fc_mode_last_time; + return; + } + + if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) { + hdev->fc_mode_last_time = hdev->tm_info.fc_mode; + hdev->tm_info.fc_mode = HCLGE_FC_PFC; + } +} + +void hclge_tm_pfc_info_update(struct hclge_dev *hdev) +{ + if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) + hclge_update_fc_mode(hdev); + else + hclge_update_fc_mode_by_dcb_flag(hdev); +} + +static void hclge_tm_schd_info_init(struct hclge_dev *hdev) +{ + hclge_tm_pg_info_init(hdev); + + hclge_tm_tc_info_init(hdev); + + hclge_tm_vport_info_update(hdev); + + hclge_tm_pfc_info_update(hdev); +} + +static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev) +{ + int ret; + u32 i; + + if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) + return 0; + + for (i = 0; i < hdev->tm_info.num_pg; i++) { + /* Cfg mapping */ + ret = hclge_tm_pg_to_pri_map_cfg( + hdev, i, hdev->tm_info.pg_info[i].tc_bit_map); + if (ret) + return ret; + } + + return 0; +} + +static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev) +{ + u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate; + struct hclge_shaper_ir_para ir_para; + u32 shaper_para; + int ret; + u32 i; + + /* Cfg pg schd */ + if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) + return 0; + + /* Pg to pri */ + for (i = 0; i < hdev->tm_info.num_pg; i++) { + u32 rate = hdev->tm_info.pg_info[i].bw_limit; + + /* Calc shaper para */ + ret = hclge_shaper_para_calc(rate, HCLGE_SHAPER_LVL_PG, + &ir_para, max_tm_rate); + if (ret) + return ret; + + shaper_para = hclge_tm_get_shapping_para(0, 0, 0, + HCLGE_SHAPER_BS_U_DEF, + HCLGE_SHAPER_BS_S_DEF); + ret = hclge_tm_pg_shapping_cfg(hdev, + HCLGE_TM_SHAP_C_BUCKET, i, + shaper_para, rate); + if (ret) + return ret; + + shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b, + ir_para.ir_u, + ir_para.ir_s, + HCLGE_SHAPER_BS_U_DEF, + HCLGE_SHAPER_BS_S_DEF); + ret = hclge_tm_pg_shapping_cfg(hdev, + HCLGE_TM_SHAP_P_BUCKET, i, + shaper_para, rate); + if (ret) + return ret; + } + + return 0; +} + +static int hclge_tm_pg_dwrr_cfg(struct hclge_dev *hdev) +{ + int ret; + u32 i; + + /* cfg pg schd */ + if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) + return 0; + + /* pg to prio */ + for (i = 0; i < hdev->tm_info.num_pg; i++) { + /* Cfg dwrr */ + ret = hclge_tm_pg_weight_cfg(hdev, i, hdev->tm_info.pg_dwrr[i]); + if (ret) + return ret; + } + + return 0; +} + +static int hclge_vport_q_to_qs_map(struct hclge_dev *hdev, + struct hclge_vport *vport) +{ + struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; + struct hnae3_tc_info *tc_info = &kinfo->tc_info; + struct hnae3_queue **tqp = kinfo->tqp; + u32 i, j; + int ret; + + for (i = 0; i < tc_info->num_tc; i++) { + for (j = 0; j < tc_info->tqp_count[i]; j++) { + struct hnae3_queue *q = tqp[tc_info->tqp_offset[i] + j]; + + ret = hclge_tm_q_to_qs_map_cfg(hdev, + hclge_get_queue_id(q), + vport->qs_offset + i); + if (ret) + return ret; + } + } + + return 0; +} + +static int hclge_tm_pri_q_qs_cfg_tc_base(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = hdev->vport; + u16 i, k; + int ret; + + /* Cfg qs -> pri mapping, one by one mapping */ + for (k = 0; k < hdev->num_alloc_vport; k++) { + struct hnae3_knic_private_info *kinfo = &vport[k].nic.kinfo; + + for (i = 0; i < kinfo->tc_info.max_tc; i++) { + u8 pri = i < kinfo->tc_info.num_tc ? i : 0; + bool link_vld = i < kinfo->tc_info.num_tc; + + ret = hclge_tm_qs_to_pri_map_cfg(hdev, + vport[k].qs_offset + i, + pri, link_vld); + if (ret) + return ret; + } + } + + return 0; +} + +static int hclge_tm_pri_q_qs_cfg_vnet_base(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = hdev->vport; + u16 i, k; + int ret; + + /* Cfg qs -> pri mapping, qs = tc, pri = vf, 8 qs -> 1 pri */ + for (k = 0; k < hdev->num_alloc_vport; k++) + for (i = 0; i < HNAE3_MAX_TC; i++) { + ret = hclge_tm_qs_to_pri_map_cfg(hdev, + vport[k].qs_offset + i, + k, true); + if (ret) + return ret; + } + + return 0; +} + +static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = hdev->vport; + int ret; + u32 i; + + if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) + ret = hclge_tm_pri_q_qs_cfg_tc_base(hdev); + else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) + ret = hclge_tm_pri_q_qs_cfg_vnet_base(hdev); + else + return -EINVAL; + + if (ret) + return ret; + + /* Cfg q -> qs mapping */ + for (i = 0; i < hdev->num_alloc_vport; i++) { + ret = hclge_vport_q_to_qs_map(hdev, vport); + if (ret) + return ret; + + vport++; + } + + return 0; +} + +static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev) +{ + u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate; + struct hclge_shaper_ir_para ir_para; + u32 shaper_para_c, shaper_para_p; + int ret; + u32 i; + + for (i = 0; i < hdev->tc_max; i++) { + u32 rate = hdev->tm_info.tc_info[i].bw_limit; + + if (rate) { + ret = hclge_shaper_para_calc(rate, HCLGE_SHAPER_LVL_PRI, + &ir_para, max_tm_rate); + if (ret) + return ret; + + shaper_para_c = hclge_tm_get_shapping_para(0, 0, 0, + HCLGE_SHAPER_BS_U_DEF, + HCLGE_SHAPER_BS_S_DEF); + shaper_para_p = hclge_tm_get_shapping_para(ir_para.ir_b, + ir_para.ir_u, + ir_para.ir_s, + HCLGE_SHAPER_BS_U_DEF, + HCLGE_SHAPER_BS_S_DEF); + } else { + shaper_para_c = 0; + shaper_para_p = 0; + } + + ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, i, + shaper_para_c, rate); + if (ret) + return ret; + + ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, i, + shaper_para_p, rate); + if (ret) + return ret; + } + + return 0; +} + +static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport) +{ + struct hclge_dev *hdev = vport->back; + struct hclge_shaper_ir_para ir_para; + u32 shaper_para; + int ret; + + ret = hclge_shaper_para_calc(vport->bw_limit, HCLGE_SHAPER_LVL_VF, + &ir_para, + hdev->ae_dev->dev_specs.max_tm_rate); + if (ret) + return ret; + + shaper_para = hclge_tm_get_shapping_para(0, 0, 0, + HCLGE_SHAPER_BS_U_DEF, + HCLGE_SHAPER_BS_S_DEF); + ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, + vport->vport_id, shaper_para, + vport->bw_limit); + if (ret) + return ret; + + shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u, + ir_para.ir_s, + HCLGE_SHAPER_BS_U_DEF, + HCLGE_SHAPER_BS_S_DEF); + ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, + vport->vport_id, shaper_para, + vport->bw_limit); + if (ret) + return ret; + + return 0; +} + +static int hclge_tm_pri_vnet_base_shaper_qs_cfg(struct hclge_vport *vport) +{ + struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; + struct hclge_dev *hdev = vport->back; + u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate; + struct hclge_shaper_ir_para ir_para; + u32 i; + int ret; + + for (i = 0; i < kinfo->tc_info.num_tc; i++) { + ret = hclge_shaper_para_calc(hdev->tm_info.tc_info[i].bw_limit, + HCLGE_SHAPER_LVL_QSET, + &ir_para, max_tm_rate); + if (ret) + return ret; + } + + return 0; +} + +static int hclge_tm_pri_vnet_base_shaper_cfg(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = hdev->vport; + int ret; + u32 i; + + /* Need config vport shaper */ + for (i = 0; i < hdev->num_alloc_vport; i++) { + ret = hclge_tm_pri_vnet_base_shaper_pri_cfg(vport); + if (ret) + return ret; + + ret = hclge_tm_pri_vnet_base_shaper_qs_cfg(vport); + if (ret) + return ret; + + vport++; + } + + return 0; +} + +static int hclge_tm_pri_shaper_cfg(struct hclge_dev *hdev) +{ + int ret; + + if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) { + ret = hclge_tm_pri_tc_base_shaper_cfg(hdev); + if (ret) + return ret; + } else { + ret = hclge_tm_pri_vnet_base_shaper_cfg(hdev); + if (ret) + return ret; + } + + return 0; +} + +static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = hdev->vport; + struct hclge_pg_info *pg_info; + u8 dwrr; + int ret; + u32 i, k; + + for (i = 0; i < hdev->tc_max; i++) { + pg_info = + &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid]; + dwrr = pg_info->tc_dwrr[i]; + + ret = hclge_tm_pri_weight_cfg(hdev, i, dwrr); + if (ret) + return ret; + + for (k = 0; k < hdev->num_alloc_vport; k++) { + struct hnae3_knic_private_info *kinfo = &vport[k].nic.kinfo; + + if (i >= kinfo->tc_info.max_tc) + continue; + + dwrr = i < kinfo->tc_info.num_tc ? vport[k].dwrr : 0; + ret = hclge_tm_qs_weight_cfg( + hdev, vport[k].qs_offset + i, + dwrr); + if (ret) + return ret; + } + } + + return 0; +} + +static int hclge_tm_ets_tc_dwrr_cfg(struct hclge_dev *hdev) +{ +#define DEFAULT_TC_OFFSET 14 + + struct hclge_ets_tc_weight_cmd *ets_weight; + struct hclge_desc desc; + unsigned int i; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, false); + ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data; + + for (i = 0; i < HNAE3_MAX_TC; i++) { + struct hclge_pg_info *pg_info; + + pg_info = &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid]; + ets_weight->tc_weight[i] = pg_info->tc_dwrr[i]; + } + + ets_weight->weight_offset = DEFAULT_TC_OFFSET; + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_tm_pri_vnet_base_dwrr_pri_cfg(struct hclge_vport *vport) +{ + struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; + struct hclge_dev *hdev = vport->back; + int ret; + u8 i; + + /* Vf dwrr */ + ret = hclge_tm_pri_weight_cfg(hdev, vport->vport_id, vport->dwrr); + if (ret) + return ret; + + /* Qset dwrr */ + for (i = 0; i < kinfo->tc_info.num_tc; i++) { + ret = hclge_tm_qs_weight_cfg( + hdev, vport->qs_offset + i, + hdev->tm_info.pg_info[0].tc_dwrr[i]); + if (ret) + return ret; + } + + return 0; +} + +static int hclge_tm_pri_vnet_base_dwrr_cfg(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = hdev->vport; + int ret; + u32 i; + + for (i = 0; i < hdev->num_alloc_vport; i++) { + ret = hclge_tm_pri_vnet_base_dwrr_pri_cfg(vport); + if (ret) + return ret; + + vport++; + } + + return 0; +} + +static int hclge_tm_pri_dwrr_cfg(struct hclge_dev *hdev) +{ + int ret; + + if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) { + ret = hclge_tm_pri_tc_base_dwrr_cfg(hdev); + if (ret) + return ret; + + if (!hnae3_dev_dcb_supported(hdev)) + return 0; + + ret = hclge_tm_ets_tc_dwrr_cfg(hdev); + if (ret == -EOPNOTSUPP) { + dev_warn(&hdev->pdev->dev, + "fw %08x doesn't support ets tc weight cmd\n", + hdev->fw_version); + ret = 0; + } + + return ret; + } else { + ret = hclge_tm_pri_vnet_base_dwrr_cfg(hdev); + if (ret) + return ret; + } + + return 0; +} + +static int hclge_tm_map_cfg(struct hclge_dev *hdev) +{ + int ret; + + ret = hclge_up_to_tc_map(hdev); + if (ret) + return ret; + + if (hdev->vport[0].nic.kinfo.tc_map_mode == HNAE3_TC_MAP_MODE_DSCP) { + ret = hclge_dscp_to_tc_map(hdev); + if (ret) + return ret; + } + + ret = hclge_tm_pg_to_pri_map(hdev); + if (ret) + return ret; + + return hclge_tm_pri_q_qs_cfg(hdev); +} + +static int hclge_tm_shaper_cfg(struct hclge_dev *hdev) +{ + int ret; + + ret = hclge_tm_port_shaper_cfg(hdev); + if (ret) + return ret; + + ret = hclge_tm_pg_shaper_cfg(hdev); + if (ret) + return ret; + + return hclge_tm_pri_shaper_cfg(hdev); +} + +int hclge_tm_dwrr_cfg(struct hclge_dev *hdev) +{ + int ret; + + ret = hclge_tm_pg_dwrr_cfg(hdev); + if (ret) + return ret; + + return hclge_tm_pri_dwrr_cfg(hdev); +} + +static int hclge_tm_lvl2_schd_mode_cfg(struct hclge_dev *hdev) +{ + int ret; + u8 i; + + /* Only being config on TC-Based scheduler mode */ + if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) + return 0; + + for (i = 0; i < hdev->tm_info.num_pg; i++) { + ret = hclge_tm_pg_schd_mode_cfg(hdev, i); + if (ret) + return ret; + } + + return 0; +} + +static int hclge_tm_schd_mode_tc_base_cfg(struct hclge_dev *hdev, u8 pri_id) +{ + struct hclge_vport *vport = hdev->vport; + int ret; + u8 mode; + u16 i; + + ret = hclge_tm_pri_schd_mode_cfg(hdev, pri_id); + if (ret) + return ret; + + for (i = 0; i < hdev->num_alloc_vport; i++) { + struct hnae3_knic_private_info *kinfo = &vport[i].nic.kinfo; + + if (pri_id >= kinfo->tc_info.max_tc) + continue; + + mode = pri_id < kinfo->tc_info.num_tc ? HCLGE_SCH_MODE_DWRR : + HCLGE_SCH_MODE_SP; + ret = hclge_tm_qs_schd_mode_cfg(hdev, + vport[i].qs_offset + pri_id, + mode); + if (ret) + return ret; + } + + return 0; +} + +static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport) +{ + struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; + struct hclge_dev *hdev = vport->back; + int ret; + u8 i; + + if (vport->vport_id >= HNAE3_MAX_TC) + return -EINVAL; + + ret = hclge_tm_pri_schd_mode_cfg(hdev, vport->vport_id); + if (ret) + return ret; + + for (i = 0; i < kinfo->tc_info.num_tc; i++) { + u8 sch_mode = hdev->tm_info.tc_info[i].tc_sch_mode; + + ret = hclge_tm_qs_schd_mode_cfg(hdev, vport->qs_offset + i, + sch_mode); + if (ret) + return ret; + } + + return 0; +} + +static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = hdev->vport; + int ret; + u8 i; + + if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) { + for (i = 0; i < hdev->tc_max; i++) { + ret = hclge_tm_schd_mode_tc_base_cfg(hdev, i); + if (ret) + return ret; + } + } else { + for (i = 0; i < hdev->num_alloc_vport; i++) { + ret = hclge_tm_schd_mode_vnet_base_cfg(vport); + if (ret) + return ret; + + vport++; + } + } + + return 0; +} + +static int hclge_tm_schd_mode_hw(struct hclge_dev *hdev) +{ + int ret; + + ret = hclge_tm_lvl2_schd_mode_cfg(hdev); + if (ret) + return ret; + + return hclge_tm_lvl34_schd_mode_cfg(hdev); +} + +int hclge_tm_schd_setup_hw(struct hclge_dev *hdev) +{ + int ret; + + /* Cfg tm mapping */ + ret = hclge_tm_map_cfg(hdev); + if (ret) + return ret; + + /* Cfg tm shaper */ + ret = hclge_tm_shaper_cfg(hdev); + if (ret) + return ret; + + /* Cfg dwrr */ + ret = hclge_tm_dwrr_cfg(hdev); + if (ret) + return ret; + + /* Cfg schd mode for each level schd */ + return hclge_tm_schd_mode_hw(hdev); +} + +static int hclge_pause_param_setup_hw(struct hclge_dev *hdev) +{ + struct hclge_mac *mac = &hdev->hw.mac; + + return hclge_pause_param_cfg(hdev, mac->mac_addr, + HCLGE_DEFAULT_PAUSE_TRANS_GAP, + HCLGE_DEFAULT_PAUSE_TRANS_TIME); +} + +static int hclge_pfc_setup_hw(struct hclge_dev *hdev) +{ + u8 enable_bitmap = 0; + + if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) + enable_bitmap = HCLGE_TX_MAC_PAUSE_EN_MSK | + HCLGE_RX_MAC_PAUSE_EN_MSK; + + return hclge_pfc_pause_en_cfg(hdev, enable_bitmap, + hdev->tm_info.pfc_en); +} + +/* for the queues that use for backpress, divides to several groups, + * each group contains 32 queue sets, which can be represented by u32 bitmap. + */ +static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc) +{ + u16 grp_id_shift = HCLGE_BP_GRP_ID_S; + u16 grp_id_mask = HCLGE_BP_GRP_ID_M; + u8 grp_num = HCLGE_BP_GRP_NUM; + int i; + + if (hdev->num_tqps > HCLGE_TQP_MAX_SIZE_DEV_V2) { + grp_num = HCLGE_BP_EXT_GRP_NUM; + grp_id_mask = HCLGE_BP_EXT_GRP_ID_M; + grp_id_shift = HCLGE_BP_EXT_GRP_ID_S; + } + + for (i = 0; i < grp_num; i++) { + u32 qs_bitmap = 0; + int k, ret; + + for (k = 0; k < hdev->num_alloc_vport; k++) { + struct hclge_vport *vport = &hdev->vport[k]; + u16 qs_id = vport->qs_offset + tc; + u8 grp, sub_grp; + + grp = hnae3_get_field(qs_id, grp_id_mask, grp_id_shift); + sub_grp = hnae3_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M, + HCLGE_BP_SUB_GRP_ID_S); + if (i == grp) + qs_bitmap |= (1 << sub_grp); + } + + ret = hclge_tm_qs_bp_cfg(hdev, tc, i, qs_bitmap); + if (ret) + return ret; + } + + return 0; +} + +int hclge_mac_pause_setup_hw(struct hclge_dev *hdev) +{ + bool tx_en, rx_en; + + switch (hdev->tm_info.fc_mode) { + case HCLGE_FC_NONE: + tx_en = false; + rx_en = false; + break; + case HCLGE_FC_RX_PAUSE: + tx_en = false; + rx_en = true; + break; + case HCLGE_FC_TX_PAUSE: + tx_en = true; + rx_en = false; + break; + case HCLGE_FC_FULL: + tx_en = true; + rx_en = true; + break; + case HCLGE_FC_PFC: + tx_en = false; + rx_en = false; + break; + default: + tx_en = true; + rx_en = true; + } + + return hclge_mac_pause_en_cfg(hdev, tx_en, rx_en); +} + +static int hclge_tm_bp_setup(struct hclge_dev *hdev) +{ + int ret; + int i; + + for (i = 0; i < hdev->tm_info.num_tc; i++) { + ret = hclge_bp_setup_hw(hdev, i); + if (ret) + return ret; + } + + return 0; +} + +int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init) +{ + int ret; + + ret = hclge_pause_param_setup_hw(hdev); + if (ret) + return ret; + + ret = hclge_mac_pause_setup_hw(hdev); + if (ret) + return ret; + + /* Only DCB-supported dev supports qset back pressure and pfc cmd */ + if (!hnae3_dev_dcb_supported(hdev)) + return 0; + + /* GE MAC does not support PFC, when driver is initializing and MAC + * is in GE Mode, ignore the error here, otherwise initialization + * will fail. + */ + ret = hclge_pfc_setup_hw(hdev); + if (init && ret == -EOPNOTSUPP) + dev_warn(&hdev->pdev->dev, "GE MAC does not support pfc\n"); + else if (ret) { + dev_err(&hdev->pdev->dev, "config pfc failed! ret = %d\n", + ret); + return ret; + } + + return hclge_tm_bp_setup(hdev); +} + +void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc) +{ + struct hclge_vport *vport = hdev->vport; + struct hnae3_knic_private_info *kinfo; + u32 i, k; + + for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) { + hdev->tm_info.prio_tc[i] = prio_tc[i]; + + for (k = 0; k < hdev->num_alloc_vport; k++) { + kinfo = &vport[k].nic.kinfo; + kinfo->tc_info.prio_tc[i] = prio_tc[i]; + } + } +} + +void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc) +{ + u8 bit_map = 0; + u8 i; + + hdev->tm_info.num_tc = num_tc; + + for (i = 0; i < hdev->tm_info.num_tc; i++) + bit_map |= BIT(i); + + if (!bit_map) { + bit_map = 1; + hdev->tm_info.num_tc = 1; + } + + hdev->hw_tc_map = bit_map; + + hclge_tm_schd_info_init(hdev); +} + +int hclge_tm_init_hw(struct hclge_dev *hdev, bool init) +{ + int ret; + + if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) && + (hdev->tx_sch_mode != HCLGE_FLAG_VNET_BASE_SCH_MODE)) + return -ENOTSUPP; + + ret = hclge_tm_schd_setup_hw(hdev); + if (ret) + return ret; + + ret = hclge_pause_setup_hw(hdev, init); + if (ret) + return ret; + + return 0; +} + +int hclge_tm_schd_init(struct hclge_dev *hdev) +{ + /* fc_mode is HCLGE_FC_FULL on reset */ + hdev->tm_info.fc_mode = HCLGE_FC_FULL; + hdev->fc_mode_last_time = hdev->tm_info.fc_mode; + + if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE && + hdev->tm_info.num_pg != 1) + return -EINVAL; + + hclge_tm_schd_info_init(hdev); + hclge_dscp_to_prio_map_init(hdev); + + return hclge_tm_init_hw(hdev, true); +} + +int hclge_tm_vport_map_update(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = hdev->vport; + int ret; + + hclge_tm_vport_tc_info_update(vport); + + ret = hclge_vport_q_to_qs_map(hdev, vport); + if (ret) + return ret; + + if (hdev->tm_info.num_tc == 1 && !hdev->tm_info.pfc_en) + return 0; + + return hclge_tm_bp_setup(hdev); +} + +int hclge_tm_get_qset_num(struct hclge_dev *hdev, u16 *qset_num) +{ + struct hclge_tm_nodes_cmd *nodes; + struct hclge_desc desc; + int ret; + + if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) { + /* Each PF has 8 qsets and each VF has 1 qset */ + *qset_num = HCLGE_TM_PF_MAX_QSET_NUM + pci_num_vf(hdev->pdev); + return 0; + } + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get qset num, ret = %d\n", ret); + return ret; + } + + nodes = (struct hclge_tm_nodes_cmd *)desc.data; + *qset_num = le16_to_cpu(nodes->qset_num); + return 0; +} + +int hclge_tm_get_pri_num(struct hclge_dev *hdev, u8 *pri_num) +{ + struct hclge_tm_nodes_cmd *nodes; + struct hclge_desc desc; + int ret; + + if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) { + *pri_num = HCLGE_TM_PF_MAX_PRI_NUM; + return 0; + } + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get pri num, ret = %d\n", ret); + return ret; + } + + nodes = (struct hclge_tm_nodes_cmd *)desc.data; + *pri_num = nodes->pri_num; + return 0; +} + +int hclge_tm_get_qset_map_pri(struct hclge_dev *hdev, u16 qset_id, u8 *priority, + u8 *link_vld) +{ + struct hclge_qs_to_pri_link_cmd *map; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, true); + map = (struct hclge_qs_to_pri_link_cmd *)desc.data; + map->qs_id = cpu_to_le16(qset_id); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get qset map priority, ret = %d\n", ret); + return ret; + } + + *priority = map->priority; + *link_vld = map->link_vld; + return 0; +} + +int hclge_tm_get_qset_sch_mode(struct hclge_dev *hdev, u16 qset_id, u8 *mode) +{ + struct hclge_qs_sch_mode_cfg_cmd *qs_sch_mode; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, true); + qs_sch_mode = (struct hclge_qs_sch_mode_cfg_cmd *)desc.data; + qs_sch_mode->qs_id = cpu_to_le16(qset_id); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get qset sch mode, ret = %d\n", ret); + return ret; + } + + *mode = qs_sch_mode->sch_mode; + return 0; +} + +int hclge_tm_get_qset_weight(struct hclge_dev *hdev, u16 qset_id, u8 *weight) +{ + struct hclge_qs_weight_cmd *qs_weight; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, true); + qs_weight = (struct hclge_qs_weight_cmd *)desc.data; + qs_weight->qs_id = cpu_to_le16(qset_id); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get qset weight, ret = %d\n", ret); + return ret; + } + + *weight = qs_weight->dwrr; + return 0; +} + +int hclge_tm_get_qset_shaper(struct hclge_dev *hdev, u16 qset_id, + struct hclge_tm_shaper_para *para) +{ + struct hclge_qs_shapping_cmd *shap_cfg_cmd; + struct hclge_desc desc; + u32 shapping_para; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG, true); + shap_cfg_cmd = (struct hclge_qs_shapping_cmd *)desc.data; + shap_cfg_cmd->qs_id = cpu_to_le16(qset_id); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get qset %u shaper, ret = %d\n", qset_id, + ret); + return ret; + } + + shapping_para = le32_to_cpu(shap_cfg_cmd->qs_shapping_para); + para->ir_b = hclge_tm_get_field(shapping_para, IR_B); + para->ir_u = hclge_tm_get_field(shapping_para, IR_U); + para->ir_s = hclge_tm_get_field(shapping_para, IR_S); + para->bs_b = hclge_tm_get_field(shapping_para, BS_B); + para->bs_s = hclge_tm_get_field(shapping_para, BS_S); + para->flag = shap_cfg_cmd->flag; + para->rate = le32_to_cpu(shap_cfg_cmd->qs_rate); + return 0; +} + +int hclge_tm_get_pri_sch_mode(struct hclge_dev *hdev, u8 pri_id, u8 *mode) +{ + struct hclge_pri_sch_mode_cfg_cmd *pri_sch_mode; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, true); + pri_sch_mode = (struct hclge_pri_sch_mode_cfg_cmd *)desc.data; + pri_sch_mode->pri_id = pri_id; + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get priority sch mode, ret = %d\n", ret); + return ret; + } + + *mode = pri_sch_mode->sch_mode; + return 0; +} + +int hclge_tm_get_pri_weight(struct hclge_dev *hdev, u8 pri_id, u8 *weight) +{ + struct hclge_priority_weight_cmd *priority_weight; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, true); + priority_weight = (struct hclge_priority_weight_cmd *)desc.data; + priority_weight->pri_id = pri_id; + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get priority weight, ret = %d\n", ret); + return ret; + } + + *weight = priority_weight->dwrr; + return 0; +} + +int hclge_tm_get_pri_shaper(struct hclge_dev *hdev, u8 pri_id, + enum hclge_opcode_type cmd, + struct hclge_tm_shaper_para *para) +{ + struct hclge_pri_shapping_cmd *shap_cfg_cmd; + struct hclge_desc desc; + u32 shapping_para; + int ret; + + if (cmd != HCLGE_OPC_TM_PRI_C_SHAPPING && + cmd != HCLGE_OPC_TM_PRI_P_SHAPPING) + return -EINVAL; + + hclge_cmd_setup_basic_desc(&desc, cmd, true); + shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data; + shap_cfg_cmd->pri_id = pri_id; + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get priority shaper(%#x), ret = %d\n", + cmd, ret); + return ret; + } + + shapping_para = le32_to_cpu(shap_cfg_cmd->pri_shapping_para); + para->ir_b = hclge_tm_get_field(shapping_para, IR_B); + para->ir_u = hclge_tm_get_field(shapping_para, IR_U); + para->ir_s = hclge_tm_get_field(shapping_para, IR_S); + para->bs_b = hclge_tm_get_field(shapping_para, BS_B); + para->bs_s = hclge_tm_get_field(shapping_para, BS_S); + para->flag = shap_cfg_cmd->flag; + para->rate = le32_to_cpu(shap_cfg_cmd->pri_rate); + return 0; +} + +int hclge_tm_get_q_to_qs_map(struct hclge_dev *hdev, u16 q_id, u16 *qset_id) +{ + struct hclge_nq_to_qs_link_cmd *map; + struct hclge_desc desc; + u16 qs_id_l; + u16 qs_id_h; + int ret; + + map = (struct hclge_nq_to_qs_link_cmd *)desc.data; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, true); + map->nq_id = cpu_to_le16(q_id); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get queue to qset map, ret = %d\n", ret); + return ret; + } + *qset_id = le16_to_cpu(map->qset_id); + + /* convert qset_id to the following format, drop the vld bit + * | qs_id_h | vld | qs_id_l | + * qset_id: | 15 ~ 11 | 10 | 9 ~ 0 | + * \ \ / / + * \ \ / / + * qset_id: | 15 | 14 ~ 10 | 9 ~ 0 | + */ + qs_id_l = hnae3_get_field(*qset_id, HCLGE_TM_QS_ID_L_MSK, + HCLGE_TM_QS_ID_L_S); + qs_id_h = hnae3_get_field(*qset_id, HCLGE_TM_QS_ID_H_EXT_MSK, + HCLGE_TM_QS_ID_H_EXT_S); + *qset_id = 0; + hnae3_set_field(*qset_id, HCLGE_TM_QS_ID_L_MSK, HCLGE_TM_QS_ID_L_S, + qs_id_l); + hnae3_set_field(*qset_id, HCLGE_TM_QS_ID_H_MSK, HCLGE_TM_QS_ID_H_S, + qs_id_h); + return 0; +} + +int hclge_tm_get_q_to_tc(struct hclge_dev *hdev, u16 q_id, u8 *tc_id) +{ +#define HCLGE_TM_TC_MASK 0x7 + + struct hclge_tqp_tx_queue_tc_cmd *tc; + struct hclge_desc desc; + int ret; + + tc = (struct hclge_tqp_tx_queue_tc_cmd *)desc.data; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TQP_TX_QUEUE_TC, true); + tc->queue_id = cpu_to_le16(q_id); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get queue to tc map, ret = %d\n", ret); + return ret; + } + + *tc_id = tc->tc_id & HCLGE_TM_TC_MASK; + return 0; +} + +int hclge_tm_get_pg_to_pri_map(struct hclge_dev *hdev, u8 pg_id, + u8 *pri_bit_map) +{ + struct hclge_pg_to_pri_link_cmd *map; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_TO_PRI_LINK, true); + map = (struct hclge_pg_to_pri_link_cmd *)desc.data; + map->pg_id = pg_id; + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get pg to pri map, ret = %d\n", ret); + return ret; + } + + *pri_bit_map = map->pri_bit_map; + return 0; +} + +int hclge_tm_get_pg_weight(struct hclge_dev *hdev, u8 pg_id, u8 *weight) +{ + struct hclge_pg_weight_cmd *pg_weight_cmd; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_WEIGHT, true); + pg_weight_cmd = (struct hclge_pg_weight_cmd *)desc.data; + pg_weight_cmd->pg_id = pg_id; + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get pg weight, ret = %d\n", ret); + return ret; + } + + *weight = pg_weight_cmd->dwrr; + return 0; +} + +int hclge_tm_get_pg_sch_mode(struct hclge_dev *hdev, u8 pg_id, u8 *mode) +{ + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_SCH_MODE_CFG, true); + desc.data[0] = cpu_to_le32(pg_id); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get pg sch mode, ret = %d\n", ret); + return ret; + } + + *mode = (u8)le32_to_cpu(desc.data[1]); + return 0; +} + +int hclge_tm_get_pg_shaper(struct hclge_dev *hdev, u8 pg_id, + enum hclge_opcode_type cmd, + struct hclge_tm_shaper_para *para) +{ + struct hclge_pg_shapping_cmd *shap_cfg_cmd; + struct hclge_desc desc; + u32 shapping_para; + int ret; + + if (cmd != HCLGE_OPC_TM_PG_C_SHAPPING && + cmd != HCLGE_OPC_TM_PG_P_SHAPPING) + return -EINVAL; + + hclge_cmd_setup_basic_desc(&desc, cmd, true); + shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data; + shap_cfg_cmd->pg_id = pg_id; + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get pg shaper(%#x), ret = %d\n", + cmd, ret); + return ret; + } + + shapping_para = le32_to_cpu(shap_cfg_cmd->pg_shapping_para); + para->ir_b = hclge_tm_get_field(shapping_para, IR_B); + para->ir_u = hclge_tm_get_field(shapping_para, IR_U); + para->ir_s = hclge_tm_get_field(shapping_para, IR_S); + para->bs_b = hclge_tm_get_field(shapping_para, BS_B); + para->bs_s = hclge_tm_get_field(shapping_para, BS_S); + para->flag = shap_cfg_cmd->flag; + para->rate = le32_to_cpu(shap_cfg_cmd->pg_rate); + return 0; +} + +int hclge_tm_get_port_shaper(struct hclge_dev *hdev, + struct hclge_tm_shaper_para *para) +{ + struct hclge_port_shapping_cmd *port_shap_cfg_cmd; + struct hclge_desc desc; + u32 shapping_para; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get port shaper, ret = %d\n", ret); + return ret; + } + + port_shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data; + shapping_para = le32_to_cpu(port_shap_cfg_cmd->port_shapping_para); + para->ir_b = hclge_tm_get_field(shapping_para, IR_B); + para->ir_u = hclge_tm_get_field(shapping_para, IR_U); + para->ir_s = hclge_tm_get_field(shapping_para, IR_S); + para->bs_b = hclge_tm_get_field(shapping_para, BS_B); + para->bs_s = hclge_tm_get_field(shapping_para, BS_S); + para->flag = port_shap_cfg_cmd->flag; + para->rate = le32_to_cpu(port_shap_cfg_cmd->port_rate); + + return 0; +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h new file mode 100644 index 000000000..251e80845 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h @@ -0,0 +1,276 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +// Copyright (c) 2016-2017 Hisilicon Limited. + +#ifndef __HCLGE_TM_H +#define __HCLGE_TM_H + +#include <linux/types.h> + +#include "hnae3.h" + +struct hclge_dev; +struct hclge_vport; +enum hclge_opcode_type; + +/* MAC Pause */ +#define HCLGE_TX_MAC_PAUSE_EN_MSK BIT(0) +#define HCLGE_RX_MAC_PAUSE_EN_MSK BIT(1) + +#define HCLGE_TM_PORT_BASE_MODE_MSK BIT(0) + +#define HCLGE_DEFAULT_PAUSE_TRANS_GAP 0x7F +#define HCLGE_DEFAULT_PAUSE_TRANS_TIME 0xFFFF + +/* SP or DWRR */ +#define HCLGE_TM_TX_SCHD_DWRR_MSK BIT(0) +#define HCLGE_TM_TX_SCHD_SP_MSK 0xFE + +#define HCLGE_ETHER_MAX_RATE 100000 + +#define HCLGE_TM_PF_MAX_PRI_NUM 8 +#define HCLGE_TM_PF_MAX_QSET_NUM 8 + +#define HCLGE_DSCP_MAP_TC_BD_NUM 2 +#define HCLGE_DSCP_TC_SHIFT(n) (((n) & 1) * 4) + +struct hclge_pg_to_pri_link_cmd { + u8 pg_id; + u8 rsvd1[3]; + u8 pri_bit_map; +}; + +struct hclge_qs_to_pri_link_cmd { + __le16 qs_id; + __le16 rsvd; + u8 priority; +#define HCLGE_TM_QS_PRI_LINK_VLD_MSK BIT(0) + u8 link_vld; +}; + +struct hclge_nq_to_qs_link_cmd { + __le16 nq_id; + __le16 rsvd; +#define HCLGE_TM_Q_QS_LINK_VLD_MSK BIT(10) +#define HCLGE_TM_QS_ID_L_MSK GENMASK(9, 0) +#define HCLGE_TM_QS_ID_L_S 0 +#define HCLGE_TM_QS_ID_H_MSK GENMASK(14, 10) +#define HCLGE_TM_QS_ID_H_S 10 +#define HCLGE_TM_QS_ID_H_EXT_S 11 +#define HCLGE_TM_QS_ID_H_EXT_MSK GENMASK(15, 11) + __le16 qset_id; +}; + +struct hclge_tqp_tx_queue_tc_cmd { + __le16 queue_id; + __le16 rsvd; + u8 tc_id; + u8 rev[3]; +}; + +struct hclge_pg_weight_cmd { + u8 pg_id; + u8 dwrr; +}; + +struct hclge_priority_weight_cmd { + u8 pri_id; + u8 dwrr; +}; + +struct hclge_pri_sch_mode_cfg_cmd { + u8 pri_id; + u8 rsvd[3]; + u8 sch_mode; +}; + +struct hclge_qs_sch_mode_cfg_cmd { + __le16 qs_id; + u8 rsvd[2]; + u8 sch_mode; +}; + +struct hclge_qs_weight_cmd { + __le16 qs_id; + u8 dwrr; +}; + +struct hclge_ets_tc_weight_cmd { + u8 tc_weight[HNAE3_MAX_TC]; + u8 weight_offset; + u8 rsvd[15]; +}; + +#define HCLGE_TM_SHAP_IR_B_MSK GENMASK(7, 0) +#define HCLGE_TM_SHAP_IR_B_LSH 0 +#define HCLGE_TM_SHAP_IR_U_MSK GENMASK(11, 8) +#define HCLGE_TM_SHAP_IR_U_LSH 8 +#define HCLGE_TM_SHAP_IR_S_MSK GENMASK(15, 12) +#define HCLGE_TM_SHAP_IR_S_LSH 12 +#define HCLGE_TM_SHAP_BS_B_MSK GENMASK(20, 16) +#define HCLGE_TM_SHAP_BS_B_LSH 16 +#define HCLGE_TM_SHAP_BS_S_MSK GENMASK(25, 21) +#define HCLGE_TM_SHAP_BS_S_LSH 21 + +enum hclge_shap_bucket { + HCLGE_TM_SHAP_C_BUCKET = 0, + HCLGE_TM_SHAP_P_BUCKET, +}; + +/* set bit HCLGE_TM_RATE_VLD to 1 means use 'rate' to config shaping */ +#define HCLGE_TM_RATE_VLD 0 + +struct hclge_pri_shapping_cmd { + u8 pri_id; + u8 rsvd[3]; + __le32 pri_shapping_para; + u8 flag; + u8 rsvd1[3]; + __le32 pri_rate; +}; + +struct hclge_pg_shapping_cmd { + u8 pg_id; + u8 rsvd[3]; + __le32 pg_shapping_para; + u8 flag; + u8 rsvd1[3]; + __le32 pg_rate; +}; + +struct hclge_qs_shapping_cmd { + __le16 qs_id; + u8 rsvd[2]; + __le32 qs_shapping_para; + u8 flag; + u8 rsvd1[3]; + __le32 qs_rate; +}; + +#define HCLGE_BP_GRP_NUM 32 +#define HCLGE_BP_SUB_GRP_ID_S 0 +#define HCLGE_BP_SUB_GRP_ID_M GENMASK(4, 0) +#define HCLGE_BP_GRP_ID_S 5 +#define HCLGE_BP_GRP_ID_M GENMASK(9, 5) + +#define HCLGE_BP_EXT_GRP_NUM 40 +#define HCLGE_BP_EXT_GRP_ID_S 5 +#define HCLGE_BP_EXT_GRP_ID_M GENMASK(10, 5) + +struct hclge_bp_to_qs_map_cmd { + u8 tc_id; + u8 rsvd[2]; + u8 qs_group_id; + __le32 qs_bit_map; + u32 rsvd1; +}; + +#define HCLGE_PFC_DISABLE 0 +#define HCLGE_PFC_TX_RX_DISABLE 0 + +struct hclge_pfc_en_cmd { + u8 tx_rx_en_bitmap; + u8 pri_en_bitmap; +}; + +struct hclge_cfg_pause_param_cmd { + u8 mac_addr[ETH_ALEN]; + u8 pause_trans_gap; + u8 rsvd; + __le16 pause_trans_time; + u8 rsvd1[6]; + /* extra mac address to do double check for pause frame */ + u8 mac_addr_extra[ETH_ALEN]; + u16 rsvd2; +}; + +struct hclge_pfc_stats_cmd { + __le64 pkt_num[3]; +}; + +struct hclge_port_shapping_cmd { + __le32 port_shapping_para; + u8 flag; + u8 rsvd[3]; + __le32 port_rate; +}; + +struct hclge_shaper_ir_para { + u8 ir_b; /* IR_B parameter of IR shaper */ + u8 ir_u; /* IR_U parameter of IR shaper */ + u8 ir_s; /* IR_S parameter of IR shaper */ +}; + +struct hclge_tm_nodes_cmd { + u8 pg_base_id; + u8 pri_base_id; + __le16 qset_base_id; + __le16 queue_base_id; + u8 pg_num; + u8 pri_num; + __le16 qset_num; + __le16 queue_num; +}; + +struct hclge_tm_shaper_para { + u32 rate; + u8 ir_b; + u8 ir_u; + u8 ir_s; + u8 bs_b; + u8 bs_s; + u8 flag; +}; + +#define hclge_tm_set_field(dest, string, val) \ + hnae3_set_field((dest), \ + (HCLGE_TM_SHAP_##string##_MSK), \ + (HCLGE_TM_SHAP_##string##_LSH), val) +#define hclge_tm_get_field(src, string) \ + hnae3_get_field((src), HCLGE_TM_SHAP_##string##_MSK, \ + HCLGE_TM_SHAP_##string##_LSH) + +int hclge_tm_schd_init(struct hclge_dev *hdev); +int hclge_tm_vport_map_update(struct hclge_dev *hdev); +int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init); +int hclge_tm_schd_setup_hw(struct hclge_dev *hdev); +void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc); +void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc); +void hclge_tm_pfc_info_update(struct hclge_dev *hdev); +int hclge_tm_dwrr_cfg(struct hclge_dev *hdev); +int hclge_tm_init_hw(struct hclge_dev *hdev, bool init); +int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap, + u8 pfc_bitmap); +int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx); +int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr); +int hclge_mac_pause_setup_hw(struct hclge_dev *hdev); +void hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats); +void hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats); +int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate); +int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev); +int hclge_tm_get_qset_num(struct hclge_dev *hdev, u16 *qset_num); +int hclge_tm_get_pri_num(struct hclge_dev *hdev, u8 *pri_num); +int hclge_tm_get_qset_map_pri(struct hclge_dev *hdev, u16 qset_id, u8 *priority, + u8 *link_vld); +int hclge_tm_get_qset_sch_mode(struct hclge_dev *hdev, u16 qset_id, u8 *mode); +int hclge_tm_get_qset_weight(struct hclge_dev *hdev, u16 qset_id, u8 *weight); +int hclge_tm_get_qset_shaper(struct hclge_dev *hdev, u16 qset_id, + struct hclge_tm_shaper_para *para); +int hclge_tm_get_pri_sch_mode(struct hclge_dev *hdev, u8 pri_id, u8 *mode); +int hclge_tm_get_pri_weight(struct hclge_dev *hdev, u8 pri_id, u8 *weight); +int hclge_tm_get_pri_shaper(struct hclge_dev *hdev, u8 pri_id, + enum hclge_opcode_type cmd, + struct hclge_tm_shaper_para *para); +int hclge_tm_get_q_to_qs_map(struct hclge_dev *hdev, u16 q_id, u16 *qset_id); +int hclge_tm_get_q_to_tc(struct hclge_dev *hdev, u16 q_id, u8 *tc_id); +int hclge_tm_get_pg_to_pri_map(struct hclge_dev *hdev, u8 pg_id, + u8 *pri_bit_map); +int hclge_tm_get_pg_weight(struct hclge_dev *hdev, u8 pg_id, u8 *weight); +int hclge_tm_get_pg_sch_mode(struct hclge_dev *hdev, u8 pg_id, u8 *mode); +int hclge_tm_get_pg_shaper(struct hclge_dev *hdev, u8 pg_id, + enum hclge_opcode_type cmd, + struct hclge_tm_shaper_para *para); +int hclge_tm_get_port_shaper(struct hclge_dev *hdev, + struct hclge_tm_shaper_para *para); +int hclge_up_to_tc_map(struct hclge_dev *hdev); +int hclge_dscp_to_tc_map(struct hclge_dev *hdev); +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h new file mode 100644 index 000000000..8510b88d4 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2018-2020 Hisilicon Limited. */ + +/* This must be outside ifdef _HCLGE_TRACE_H */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM hns3 + +#if !defined(_HCLGE_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) +#define _HCLGE_TRACE_H_ + +#include <linux/tracepoint.h> + +#define PF_GET_MBX_LEN (sizeof(struct hclge_mbx_vf_to_pf_cmd) / sizeof(u32)) +#define PF_SEND_MBX_LEN (sizeof(struct hclge_mbx_pf_to_vf_cmd) / sizeof(u32)) + +TRACE_EVENT(hclge_pf_mbx_get, + TP_PROTO( + struct hclge_dev *hdev, + struct hclge_mbx_vf_to_pf_cmd *req), + TP_ARGS(hdev, req), + + TP_STRUCT__entry( + __field(u8, vfid) + __field(u8, code) + __field(u8, subcode) + __string(pciname, pci_name(hdev->pdev)) + __string(devname, &hdev->vport[0].nic.kinfo.netdev->name) + __array(u32, mbx_data, PF_GET_MBX_LEN) + ), + + TP_fast_assign( + __entry->vfid = req->mbx_src_vfid; + __entry->code = req->msg.code; + __entry->subcode = req->msg.subcode; + __assign_str(pciname, pci_name(hdev->pdev)); + __assign_str(devname, &hdev->vport[0].nic.kinfo.netdev->name); + memcpy(__entry->mbx_data, req, + sizeof(struct hclge_mbx_vf_to_pf_cmd)); + ), + + TP_printk( + "%s %s vfid:%u code:%u subcode:%u data:%s", + __get_str(pciname), __get_str(devname), __entry->vfid, + __entry->code, __entry->subcode, + __print_array(__entry->mbx_data, PF_GET_MBX_LEN, sizeof(u32)) + ) +); + +TRACE_EVENT(hclge_pf_mbx_send, + TP_PROTO( + struct hclge_dev *hdev, + struct hclge_mbx_pf_to_vf_cmd *req), + TP_ARGS(hdev, req), + + TP_STRUCT__entry( + __field(u8, vfid) + __field(u16, code) + __string(pciname, pci_name(hdev->pdev)) + __string(devname, &hdev->vport[0].nic.kinfo.netdev->name) + __array(u32, mbx_data, PF_SEND_MBX_LEN) + ), + + TP_fast_assign( + __entry->vfid = req->dest_vfid; + __entry->code = le16_to_cpu(req->msg.code); + __assign_str(pciname, pci_name(hdev->pdev)); + __assign_str(devname, &hdev->vport[0].nic.kinfo.netdev->name); + memcpy(__entry->mbx_data, req, + sizeof(struct hclge_mbx_pf_to_vf_cmd)); + ), + + TP_printk( + "%s %s vfid:%u code:%u data:%s", + __get_str(pciname), __get_str(devname), __entry->vfid, + __entry->code, + __print_array(__entry->mbx_data, PF_SEND_MBX_LEN, sizeof(u32)) + ) +); + +#endif /* _HCLGE_TRACE_H_ */ + +/* This must be outside ifdef _HCLGE_TRACE_H */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE hclge_trace +#include <trace/define_trace.h> |