From 2c3c1048746a4622d8c89a29670120dc8fab93c4 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sun, 7 Apr 2024 20:49:45 +0200 Subject: Adding upstream version 6.1.76. Signed-off-by: Daniel Baumann --- drivers/net/ethernet/hisilicon/hns3/Makefile | 29 + drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h | 256 + drivers/net/ethernet/hisilicon/hns3/hnae3.c | 392 + drivers/net/ethernet/hisilicon/hns3/hnae3.h | 932 ++ .../hisilicon/hns3/hns3_common/hclge_comm_cmd.c | 652 + .../hisilicon/hns3/hns3_common/hclge_comm_cmd.h | 471 + .../hisilicon/hns3/hns3_common/hclge_comm_rss.c | 505 + .../hisilicon/hns3/hns3_common/hclge_comm_rss.h | 134 + .../hns3/hns3_common/hclge_comm_tqp_stats.c | 115 + .../hns3/hns3_common/hclge_comm_tqp_stats.h | 39 + drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c | 130 + drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c | 1443 ++ drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h | 67 + drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 6012 +++++++++ drivers/net/ethernet/hisilicon/hns3/hns3_enet.h | 751 ++ drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c | 2156 +++ drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h | 37 + drivers/net/ethernet/hisilicon/hns3/hns3_trace.h | 138 + .../net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h | 881 ++ .../net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c | 656 + .../net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h | 15 + .../ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c | 2587 ++++ .../ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h | 774 ++ .../ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c | 134 + .../ethernet/hisilicon/hns3/hns3pf/hclge_devlink.h | 15 + .../net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c | 2940 +++++ .../net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h | 231 + .../ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 13229 +++++++++++++++++++ .../ethernet/hisilicon/hns3/hns3pf/hclge_main.h | 1149 ++ .../net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c | 1152 ++ .../ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c | 311 + .../ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h | 19 + .../net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c | 564 + .../net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h | 143 + .../net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c | 2116 +++ .../net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h | 276 + .../ethernet/hisilicon/hns3/hns3pf/hclge_trace.h | 87 + .../ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h | 136 + .../hisilicon/hns3/hns3vf/hclgevf_devlink.c | 136 + .../hisilicon/hns3/hns3vf/hclgevf_devlink.h | 15 + .../ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c | 3482 +++++ .../ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h | 298 + .../ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c | 388 + .../ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h | 87 + 44 files changed, 46080 insertions(+) create mode 100644 drivers/net/ethernet/hisilicon/hns3/Makefile create mode 100644 drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h create mode 100644 drivers/net/ethernet/hisilicon/hns3/hnae3.c create mode 100644 drivers/net/ethernet/hisilicon/hns3/hnae3.h create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.h create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3_enet.h create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3_trace.h create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.h create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.h create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h (limited to 'drivers/net/ethernet/hisilicon/hns3') diff --git a/drivers/net/ethernet/hisilicon/hns3/Makefile b/drivers/net/ethernet/hisilicon/hns3/Makefile new file mode 100644 index 000000000..6efea4662 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/Makefile @@ -0,0 +1,29 @@ +# SPDX-License-Identifier: GPL-2.0+ +# +# Makefile for the HISILICON network device drivers. +# + +ccflags-y += -I$(srctree)/$(src) +ccflags-y += -I$(srctree)/drivers/net/ethernet/hisilicon/hns3/hns3pf +ccflags-y += -I$(srctree)/drivers/net/ethernet/hisilicon/hns3/hns3vf +ccflags-y += -I$(srctree)/drivers/net/ethernet/hisilicon/hns3/hns3_common + +obj-$(CONFIG_HNS3) += hnae3.o + +obj-$(CONFIG_HNS3_ENET) += hns3.o +hns3-objs = hns3_enet.o hns3_ethtool.o hns3_debugfs.o + +hns3-$(CONFIG_HNS3_DCB) += hns3_dcbnl.o + +obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o + +hclgevf-objs = hns3vf/hclgevf_main.o hns3vf/hclgevf_mbx.o hns3vf/hclgevf_devlink.o \ + hns3_common/hclge_comm_cmd.o hns3_common/hclge_comm_rss.o hns3_common/hclge_comm_tqp_stats.o + +obj-$(CONFIG_HNS3_HCLGE) += hclge.o +hclge-objs = hns3pf/hclge_main.o hns3pf/hclge_mdio.o hns3pf/hclge_tm.o \ + hns3pf/hclge_mbx.o hns3pf/hclge_err.o hns3pf/hclge_debugfs.o hns3pf/hclge_ptp.o hns3pf/hclge_devlink.o \ + hns3_common/hclge_comm_cmd.o hns3_common/hclge_comm_rss.o hns3_common/hclge_comm_tqp_stats.o + + +hclge-$(CONFIG_HNS3_DCB) += hns3pf/hclge_dcb.o diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h new file mode 100644 index 000000000..abcd7877f --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h @@ -0,0 +1,256 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2016-2017 Hisilicon Limited. */ + +#ifndef __HCLGE_MBX_H +#define __HCLGE_MBX_H +#include +#include +#include + +enum HCLGE_MBX_OPCODE { + HCLGE_MBX_RESET = 0x01, /* (VF -> PF) assert reset */ + HCLGE_MBX_ASSERTING_RESET, /* (PF -> VF) PF is asserting reset */ + HCLGE_MBX_SET_UNICAST, /* (VF -> PF) set UC addr */ + HCLGE_MBX_SET_MULTICAST, /* (VF -> PF) set MC addr */ + HCLGE_MBX_SET_VLAN, /* (VF -> PF) set VLAN */ + HCLGE_MBX_MAP_RING_TO_VECTOR, /* (VF -> PF) map ring-to-vector */ + HCLGE_MBX_UNMAP_RING_TO_VECTOR, /* (VF -> PF) unamp ring-to-vector */ + HCLGE_MBX_SET_PROMISC_MODE, /* (VF -> PF) set promiscuous mode */ + HCLGE_MBX_SET_MACVLAN, /* (VF -> PF) set unicast filter */ + HCLGE_MBX_API_NEGOTIATE, /* (VF -> PF) negotiate API version */ + HCLGE_MBX_GET_QINFO, /* (VF -> PF) get queue config */ + HCLGE_MBX_GET_QDEPTH, /* (VF -> PF) get queue depth */ + HCLGE_MBX_GET_BASIC_INFO, /* (VF -> PF) get basic info */ + HCLGE_MBX_GET_RETA, /* (VF -> PF) get RETA */ + HCLGE_MBX_GET_RSS_KEY, /* (VF -> PF) get RSS key */ + HCLGE_MBX_GET_MAC_ADDR, /* (VF -> PF) get MAC addr */ + HCLGE_MBX_PF_VF_RESP, /* (PF -> VF) generate response to VF */ + HCLGE_MBX_GET_BDNUM, /* (VF -> PF) get BD num */ + HCLGE_MBX_GET_BUFSIZE, /* (VF -> PF) get buffer size */ + HCLGE_MBX_GET_STREAMID, /* (VF -> PF) get stream id */ + HCLGE_MBX_SET_AESTART, /* (VF -> PF) start ae */ + HCLGE_MBX_SET_TSOSTATS, /* (VF -> PF) get tso stats */ + HCLGE_MBX_LINK_STAT_CHANGE, /* (PF -> VF) link status has changed */ + HCLGE_MBX_GET_BASE_CONFIG, /* (VF -> PF) get config */ + HCLGE_MBX_BIND_FUNC_QUEUE, /* (VF -> PF) bind function and queue */ + HCLGE_MBX_GET_LINK_STATUS, /* (VF -> PF) get link status */ + HCLGE_MBX_QUEUE_RESET, /* (VF -> PF) reset queue */ + HCLGE_MBX_KEEP_ALIVE, /* (VF -> PF) send keep alive cmd */ + HCLGE_MBX_SET_ALIVE, /* (VF -> PF) set alive state */ + HCLGE_MBX_SET_MTU, /* (VF -> PF) set mtu */ + HCLGE_MBX_GET_QID_IN_PF, /* (VF -> PF) get queue id in pf */ + HCLGE_MBX_LINK_STAT_MODE, /* (PF -> VF) link mode has changed */ + HCLGE_MBX_GET_LINK_MODE, /* (VF -> PF) get the link mode of pf */ + HCLGE_MBX_PUSH_VLAN_INFO, /* (PF -> VF) push port base vlan */ + HCLGE_MBX_GET_MEDIA_TYPE, /* (VF -> PF) get media type */ + HCLGE_MBX_PUSH_PROMISC_INFO, /* (PF -> VF) push vf promisc info */ + HCLGE_MBX_VF_UNINIT, /* (VF -> PF) vf is unintializing */ + HCLGE_MBX_HANDLE_VF_TBL, /* (VF -> PF) store/clear hw table */ + HCLGE_MBX_GET_RING_VECTOR_MAP, /* (VF -> PF) get ring-to-vector map */ + + HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf flr status */ + HCLGE_MBX_PUSH_LINK_STATUS, /* (M7 -> PF) get port link status */ + HCLGE_MBX_NCSI_ERROR, /* (M7 -> PF) receive a NCSI error */ +}; + +/* below are per-VF mac-vlan subcodes */ +enum hclge_mbx_mac_vlan_subcode { + HCLGE_MBX_MAC_VLAN_UC_MODIFY = 0, /* modify UC mac addr */ + HCLGE_MBX_MAC_VLAN_UC_ADD, /* add a new UC mac addr */ + HCLGE_MBX_MAC_VLAN_UC_REMOVE, /* remove a new UC mac addr */ + HCLGE_MBX_MAC_VLAN_MC_MODIFY, /* modify MC mac addr */ + HCLGE_MBX_MAC_VLAN_MC_ADD, /* add new MC mac addr */ + HCLGE_MBX_MAC_VLAN_MC_REMOVE, /* remove MC mac addr */ +}; + +/* below are per-VF vlan cfg subcodes */ +enum hclge_mbx_vlan_cfg_subcode { + HCLGE_MBX_VLAN_FILTER = 0, /* set vlan filter */ + HCLGE_MBX_VLAN_TX_OFF_CFG, /* set tx side vlan offload */ + HCLGE_MBX_VLAN_RX_OFF_CFG, /* set rx side vlan offload */ + HCLGE_MBX_PORT_BASE_VLAN_CFG, /* set port based vlan configuration */ + HCLGE_MBX_GET_PORT_BASE_VLAN_STATE, /* get port based vlan state */ + HCLGE_MBX_ENABLE_VLAN_FILTER, +}; + +enum hclge_mbx_tbl_cfg_subcode { + HCLGE_MBX_VPORT_LIST_CLEAR, +}; + +#define HCLGE_MBX_MAX_MSG_SIZE 14 +#define HCLGE_MBX_MAX_RESP_DATA_SIZE 8U +#define HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM 4 + +#define HCLGE_RESET_SCHED_TIMEOUT (3 * HZ) +#define HCLGE_MBX_SCHED_TIMEOUT (HZ / 2) + +struct hclge_ring_chain_param { + u8 ring_type; + u8 tqp_index; + u8 int_gl_index; +}; + +struct hclge_basic_info { + u8 hw_tc_map; + u8 rsv; + __le16 mbx_api_version; + __le32 pf_caps; +}; + +struct hclgevf_mbx_resp_status { + struct mutex mbx_mutex; /* protects against contending sync cmd resp */ + u32 origin_mbx_msg; + bool received_resp; + int resp_status; + u16 match_id; + u8 additional_info[HCLGE_MBX_MAX_RESP_DATA_SIZE]; +}; + +struct hclge_respond_to_vf_msg { + int status; + u8 data[HCLGE_MBX_MAX_RESP_DATA_SIZE]; + u16 len; +}; + +struct hclge_vf_to_pf_msg { + u8 code; + union { + struct { + u8 subcode; + u8 data[HCLGE_MBX_MAX_MSG_SIZE]; + }; + struct { + u8 en_bc; + u8 en_uc; + u8 en_mc; + u8 en_limit_promisc; + }; + struct { + u8 vector_id; + u8 ring_num; + struct hclge_ring_chain_param + param[HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM]; + }; + }; +}; + +struct hclge_pf_to_vf_msg { + __le16 code; + union { + /* used for mbx response */ + struct { + __le16 vf_mbx_msg_code; + __le16 vf_mbx_msg_subcode; + __le16 resp_status; + u8 resp_data[HCLGE_MBX_MAX_RESP_DATA_SIZE]; + }; + /* used for general mbx */ + struct { + u8 msg_data[HCLGE_MBX_MAX_MSG_SIZE]; + }; + }; +}; + +struct hclge_mbx_vf_to_pf_cmd { + u8 rsv; + u8 mbx_src_vfid; /* Auto filled by IMP */ + u8 mbx_need_resp; + u8 rsv1[1]; + u8 msg_len; + u8 rsv2; + __le16 match_id; + struct hclge_vf_to_pf_msg msg; +}; + +#define HCLGE_MBX_NEED_RESP_B 0 + +struct hclge_mbx_pf_to_vf_cmd { + u8 dest_vfid; + u8 rsv[3]; + u8 msg_len; + u8 rsv1; + __le16 match_id; + struct hclge_pf_to_vf_msg msg; +}; + +struct hclge_vf_rst_cmd { + u8 dest_vfid; + u8 vf_rst; + u8 rsv[22]; +}; + +#pragma pack(1) +struct hclge_mbx_link_status { + __le16 link_status; + __le32 speed; + __le16 duplex; + u8 flag; +}; + +struct hclge_mbx_link_mode { + __le16 idx; + __le64 link_mode; +}; + +struct hclge_mbx_port_base_vlan { + __le16 state; + __le16 vlan_proto; + __le16 qos; + __le16 vlan_tag; +}; + +struct hclge_mbx_vf_queue_info { + __le16 num_tqps; + __le16 rss_size; + __le16 rx_buf_len; +}; + +struct hclge_mbx_vf_queue_depth { + __le16 num_tx_desc; + __le16 num_rx_desc; +}; + +struct hclge_mbx_vlan_filter { + u8 is_kill; + __le16 vlan_id; + __le16 proto; +}; + +struct hclge_mbx_mtu_info { + __le32 mtu; +}; + +#pragma pack() + +/* used by VF to store the received Async responses from PF */ +struct hclgevf_mbx_arq_ring { +#define HCLGE_MBX_MAX_ARQ_MSG_SIZE 8 +#define HCLGE_MBX_MAX_ARQ_MSG_NUM 1024 + struct hclgevf_dev *hdev; + u32 head; + u32 tail; + atomic_t count; + __le16 msg_q[HCLGE_MBX_MAX_ARQ_MSG_NUM][HCLGE_MBX_MAX_ARQ_MSG_SIZE]; +}; + +struct hclge_dev; + +#define HCLGE_MBX_OPCODE_MAX 256 +struct hclge_mbx_ops_param { + struct hclge_vport *vport; + struct hclge_mbx_vf_to_pf_cmd *req; + struct hclge_respond_to_vf_msg *resp_msg; +}; + +typedef int (*hclge_mbx_ops_fn)(struct hclge_mbx_ops_param *param); + +#define hclge_mbx_ring_ptr_move_crq(crq) \ + (crq->next_to_use = (crq->next_to_use + 1) % crq->desc_num) +#define hclge_mbx_tail_ptr_move_arq(arq) \ + (arq.tail = (arq.tail + 1) % HCLGE_MBX_MAX_ARQ_MSG_NUM) +#define hclge_mbx_head_ptr_move_arq(arq) \ + (arq.head = (arq.head + 1) % HCLGE_MBX_MAX_ARQ_MSG_NUM) + +/* PF immediately push link status to VFs when link status changed */ +#define HCLGE_MBX_PUSH_LINK_STATUS_EN BIT(0) +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c new file mode 100644 index 000000000..67b0bf310 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c @@ -0,0 +1,392 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. + +#include +#include + +#include "hnae3.h" + +static LIST_HEAD(hnae3_ae_algo_list); +static LIST_HEAD(hnae3_client_list); +static LIST_HEAD(hnae3_ae_dev_list); + +void hnae3_unregister_ae_algo_prepare(struct hnae3_ae_algo *ae_algo) +{ + const struct pci_device_id *pci_id; + struct hnae3_ae_dev *ae_dev; + + if (!ae_algo) + return; + + list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) { + if (!hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B)) + continue; + + pci_id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev); + if (!pci_id) + continue; + if (IS_ENABLED(CONFIG_PCI_IOV)) + pci_disable_sriov(ae_dev->pdev); + } +} +EXPORT_SYMBOL(hnae3_unregister_ae_algo_prepare); + +/* we are keeping things simple and using single lock for all the + * list. This is a non-critical code so other updations, if happen + * in parallel, can wait. + */ +static DEFINE_MUTEX(hnae3_common_lock); + +static bool hnae3_client_match(enum hnae3_client_type client_type) +{ + if (client_type == HNAE3_CLIENT_KNIC || + client_type == HNAE3_CLIENT_ROCE) + return true; + + return false; +} + +void hnae3_set_client_init_flag(struct hnae3_client *client, + struct hnae3_ae_dev *ae_dev, + unsigned int inited) +{ + if (!client || !ae_dev) + return; + + switch (client->type) { + case HNAE3_CLIENT_KNIC: + hnae3_set_bit(ae_dev->flag, HNAE3_KNIC_CLIENT_INITED_B, inited); + break; + case HNAE3_CLIENT_ROCE: + hnae3_set_bit(ae_dev->flag, HNAE3_ROCE_CLIENT_INITED_B, inited); + break; + default: + break; + } +} +EXPORT_SYMBOL(hnae3_set_client_init_flag); + +static int hnae3_get_client_init_flag(struct hnae3_client *client, + struct hnae3_ae_dev *ae_dev) +{ + int inited = 0; + + switch (client->type) { + case HNAE3_CLIENT_KNIC: + inited = hnae3_get_bit(ae_dev->flag, + HNAE3_KNIC_CLIENT_INITED_B); + break; + case HNAE3_CLIENT_ROCE: + inited = hnae3_get_bit(ae_dev->flag, + HNAE3_ROCE_CLIENT_INITED_B); + break; + default: + break; + } + + return inited; +} + +static int hnae3_init_client_instance(struct hnae3_client *client, + struct hnae3_ae_dev *ae_dev) +{ + int ret; + + /* check if this client matches the type of ae_dev */ + if (!(hnae3_client_match(client->type) && + hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))) { + return 0; + } + + ret = ae_dev->ops->init_client_instance(client, ae_dev); + if (ret) + dev_err(&ae_dev->pdev->dev, + "fail to instantiate client, ret = %d\n", ret); + + return ret; +} + +static void hnae3_uninit_client_instance(struct hnae3_client *client, + struct hnae3_ae_dev *ae_dev) +{ + /* check if this client matches the type of ae_dev */ + if (!(hnae3_client_match(client->type) && + hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))) + return; + + if (hnae3_get_client_init_flag(client, ae_dev)) { + ae_dev->ops->uninit_client_instance(client, ae_dev); + + hnae3_set_client_init_flag(client, ae_dev, 0); + } +} + +int hnae3_register_client(struct hnae3_client *client) +{ + struct hnae3_client *client_tmp; + struct hnae3_ae_dev *ae_dev; + + if (!client) + return -ENODEV; + + mutex_lock(&hnae3_common_lock); + /* one system should only have one client for every type */ + list_for_each_entry(client_tmp, &hnae3_client_list, node) { + if (client_tmp->type == client->type) + goto exit; + } + + list_add_tail(&client->node, &hnae3_client_list); + + /* initialize the client on every matched port */ + list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) { + /* if the client could not be initialized on current port, for + * any error reasons, move on to next available port + */ + int ret = hnae3_init_client_instance(client, ae_dev); + if (ret) + dev_err(&ae_dev->pdev->dev, + "match and instantiation failed for port, ret = %d\n", + ret); + } + +exit: + mutex_unlock(&hnae3_common_lock); + + return 0; +} +EXPORT_SYMBOL(hnae3_register_client); + +void hnae3_unregister_client(struct hnae3_client *client) +{ + struct hnae3_client *client_tmp; + struct hnae3_ae_dev *ae_dev; + bool existed = false; + + if (!client) + return; + + mutex_lock(&hnae3_common_lock); + /* one system should only have one client for every type */ + list_for_each_entry(client_tmp, &hnae3_client_list, node) { + if (client_tmp->type == client->type) { + existed = true; + break; + } + } + + if (!existed) { + mutex_unlock(&hnae3_common_lock); + pr_err("client %s does not exist!\n", client->name); + return; + } + + /* un-initialize the client on every matched port */ + list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) { + hnae3_uninit_client_instance(client, ae_dev); + } + + list_del(&client->node); + mutex_unlock(&hnae3_common_lock); +} +EXPORT_SYMBOL(hnae3_unregister_client); + +/* hnae3_register_ae_algo - register a AE algorithm to hnae3 framework + * @ae_algo: AE algorithm + * NOTE: the duplicated name will not be checked + */ +void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo) +{ + const struct pci_device_id *id; + struct hnae3_ae_dev *ae_dev; + struct hnae3_client *client; + int ret; + + if (!ae_algo) + return; + + mutex_lock(&hnae3_common_lock); + + list_add_tail(&ae_algo->node, &hnae3_ae_algo_list); + + /* Check if this algo/ops matches the list of ae_devs */ + list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) { + id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev); + if (!id) + continue; + + if (!ae_algo->ops) { + dev_err(&ae_dev->pdev->dev, "ae_algo ops are null\n"); + continue; + } + ae_dev->ops = ae_algo->ops; + + ret = ae_algo->ops->init_ae_dev(ae_dev); + if (ret) { + dev_err(&ae_dev->pdev->dev, + "init ae_dev error, ret = %d\n", ret); + continue; + } + + /* ae_dev init should set flag */ + hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1); + + /* check the client list for the match with this ae_dev type and + * initialize the figure out client instance + */ + list_for_each_entry(client, &hnae3_client_list, node) { + ret = hnae3_init_client_instance(client, ae_dev); + if (ret) + dev_err(&ae_dev->pdev->dev, + "match and instantiation failed, ret = %d\n", + ret); + } + } + + mutex_unlock(&hnae3_common_lock); +} +EXPORT_SYMBOL(hnae3_register_ae_algo); + +/* hnae3_unregister_ae_algo - unregisters a AE algorithm + * @ae_algo: the AE algorithm to unregister + */ +void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo) +{ + const struct pci_device_id *id; + struct hnae3_ae_dev *ae_dev; + struct hnae3_client *client; + + if (!ae_algo) + return; + + mutex_lock(&hnae3_common_lock); + /* Check if there are matched ae_dev */ + list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) { + if (!hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B)) + continue; + + id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev); + if (!id) + continue; + + /* check the client list for the match with this ae_dev type and + * un-initialize the figure out client instance + */ + list_for_each_entry(client, &hnae3_client_list, node) + hnae3_uninit_client_instance(client, ae_dev); + + ae_algo->ops->uninit_ae_dev(ae_dev); + hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0); + ae_dev->ops = NULL; + } + + list_del(&ae_algo->node); + mutex_unlock(&hnae3_common_lock); +} +EXPORT_SYMBOL(hnae3_unregister_ae_algo); + +/* hnae3_register_ae_dev - registers a AE device to hnae3 framework + * @ae_dev: the AE device + * NOTE: the duplicated name will not be checked + */ +int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) +{ + const struct pci_device_id *id; + struct hnae3_ae_algo *ae_algo; + struct hnae3_client *client; + int ret; + + if (!ae_dev) + return -ENODEV; + + mutex_lock(&hnae3_common_lock); + + list_add_tail(&ae_dev->node, &hnae3_ae_dev_list); + + /* Check if there are matched ae_algo */ + list_for_each_entry(ae_algo, &hnae3_ae_algo_list, node) { + id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev); + if (!id) + continue; + + if (!ae_algo->ops) { + dev_err(&ae_dev->pdev->dev, "ae_algo ops are null\n"); + ret = -EOPNOTSUPP; + goto out_err; + } + ae_dev->ops = ae_algo->ops; + + ret = ae_dev->ops->init_ae_dev(ae_dev); + if (ret) { + dev_err(&ae_dev->pdev->dev, + "init ae_dev error, ret = %d\n", ret); + goto out_err; + } + + /* ae_dev init should set flag */ + hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1); + break; + } + + /* check the client list for the match with this ae_dev type and + * initialize the figure out client instance + */ + list_for_each_entry(client, &hnae3_client_list, node) { + ret = hnae3_init_client_instance(client, ae_dev); + if (ret) + dev_err(&ae_dev->pdev->dev, + "match and instantiation failed, ret = %d\n", + ret); + } + + mutex_unlock(&hnae3_common_lock); + + return 0; + +out_err: + list_del(&ae_dev->node); + mutex_unlock(&hnae3_common_lock); + + return ret; +} +EXPORT_SYMBOL(hnae3_register_ae_dev); + +/* hnae3_unregister_ae_dev - unregisters a AE device + * @ae_dev: the AE device to unregister + */ +void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev) +{ + const struct pci_device_id *id; + struct hnae3_ae_algo *ae_algo; + struct hnae3_client *client; + + if (!ae_dev) + return; + + mutex_lock(&hnae3_common_lock); + /* Check if there are matched ae_algo */ + list_for_each_entry(ae_algo, &hnae3_ae_algo_list, node) { + if (!hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B)) + continue; + + id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev); + if (!id) + continue; + + list_for_each_entry(client, &hnae3_client_list, node) + hnae3_uninit_client_instance(client, ae_dev); + + ae_algo->ops->uninit_ae_dev(ae_dev); + hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0); + ae_dev->ops = NULL; + } + + list_del(&ae_dev->node); + mutex_unlock(&hnae3_common_lock); +} +EXPORT_SYMBOL(hnae3_unregister_ae_dev); + +MODULE_AUTHOR("Huawei Tech. Co., Ltd."); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("HNAE3(Hisilicon Network Acceleration Engine) Framework"); +MODULE_VERSION(HNAE3_MOD_VERSION); diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h new file mode 100644 index 000000000..c693bb701 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -0,0 +1,932 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +// Copyright (c) 2016-2017 Hisilicon Limited. + +#ifndef __HNAE3_H +#define __HNAE3_H + +/* Names used in this framework: + * ae handle (handle): + * a set of queues provided by AE + * ring buffer queue (rbq): + * the channel between upper layer and the AE, can do tx and rx + * ring: + * a tx or rx channel within a rbq + * ring description (desc): + * an element in the ring with packet information + * buffer: + * a memory region referred by desc with the full packet payload + * + * "num" means a static number set as a parameter, "count" mean a dynamic + * number set while running + * "cb" means control block + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define HNAE3_MOD_VERSION "1.0" + +#define HNAE3_MIN_VECTOR_NUM 2 /* first one for misc, another for IO */ + +/* Device version */ +#define HNAE3_DEVICE_VERSION_V1 0x00020 +#define HNAE3_DEVICE_VERSION_V2 0x00021 +#define HNAE3_DEVICE_VERSION_V3 0x00030 + +#define HNAE3_PCI_REVISION_BIT_SIZE 8 + +/* Device IDs */ +#define HNAE3_DEV_ID_GE 0xA220 +#define HNAE3_DEV_ID_25GE 0xA221 +#define HNAE3_DEV_ID_25GE_RDMA 0xA222 +#define HNAE3_DEV_ID_25GE_RDMA_MACSEC 0xA223 +#define HNAE3_DEV_ID_50GE_RDMA 0xA224 +#define HNAE3_DEV_ID_50GE_RDMA_MACSEC 0xA225 +#define HNAE3_DEV_ID_100G_RDMA_MACSEC 0xA226 +#define HNAE3_DEV_ID_200G_RDMA 0xA228 +#define HNAE3_DEV_ID_VF 0xA22E +#define HNAE3_DEV_ID_RDMA_DCB_PFC_VF 0xA22F + +#define HNAE3_CLASS_NAME_SIZE 16 + +#define HNAE3_DEV_INITED_B 0x0 +#define HNAE3_DEV_SUPPORT_ROCE_B 0x1 +#define HNAE3_DEV_SUPPORT_DCB_B 0x2 +#define HNAE3_KNIC_CLIENT_INITED_B 0x3 +#define HNAE3_UNIC_CLIENT_INITED_B 0x4 +#define HNAE3_ROCE_CLIENT_INITED_B 0x5 + +#define HNAE3_DEV_SUPPORT_ROCE_DCB_BITS (BIT(HNAE3_DEV_SUPPORT_DCB_B) | \ + BIT(HNAE3_DEV_SUPPORT_ROCE_B)) + +#define hnae3_dev_roce_supported(hdev) \ + hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B) + +#define hnae3_dev_dcb_supported(hdev) \ + hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B) + +enum HNAE3_DEV_CAP_BITS { + HNAE3_DEV_SUPPORT_FD_B, + HNAE3_DEV_SUPPORT_GRO_B, + HNAE3_DEV_SUPPORT_FEC_B, + HNAE3_DEV_SUPPORT_UDP_GSO_B, + HNAE3_DEV_SUPPORT_QB_B, + HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, + HNAE3_DEV_SUPPORT_PTP_B, + HNAE3_DEV_SUPPORT_INT_QL_B, + HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, + HNAE3_DEV_SUPPORT_TX_PUSH_B, + HNAE3_DEV_SUPPORT_PHY_IMP_B, + HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B, + HNAE3_DEV_SUPPORT_HW_PAD_B, + HNAE3_DEV_SUPPORT_STASH_B, + HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, + HNAE3_DEV_SUPPORT_PAUSE_B, + HNAE3_DEV_SUPPORT_RAS_IMP_B, + HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B, + HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, + HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, + HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, + HNAE3_DEV_SUPPORT_CQ_B, + HNAE3_DEV_SUPPORT_FEC_STATS_B, + HNAE3_DEV_SUPPORT_LANE_NUM_B, +}; + +#define hnae3_ae_dev_fd_supported(ae_dev) \ + test_bit(HNAE3_DEV_SUPPORT_FD_B, (ae_dev)->caps) + +#define hnae3_ae_dev_gro_supported(ae_dev) \ + test_bit(HNAE3_DEV_SUPPORT_GRO_B, (ae_dev)->caps) + +#define hnae3_dev_fec_supported(hdev) \ + test_bit(HNAE3_DEV_SUPPORT_FEC_B, (hdev)->ae_dev->caps) + +#define hnae3_dev_udp_gso_supported(hdev) \ + test_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, (hdev)->ae_dev->caps) + +#define hnae3_dev_qb_supported(hdev) \ + test_bit(HNAE3_DEV_SUPPORT_QB_B, (hdev)->ae_dev->caps) + +#define hnae3_dev_fd_forward_tc_supported(hdev) \ + test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, (hdev)->ae_dev->caps) + +#define hnae3_dev_ptp_supported(hdev) \ + test_bit(HNAE3_DEV_SUPPORT_PTP_B, (hdev)->ae_dev->caps) + +#define hnae3_dev_int_ql_supported(hdev) \ + test_bit(HNAE3_DEV_SUPPORT_INT_QL_B, (hdev)->ae_dev->caps) + +#define hnae3_dev_hw_csum_supported(hdev) \ + test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, (hdev)->ae_dev->caps) + +#define hnae3_dev_tx_push_supported(hdev) \ + test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, (hdev)->ae_dev->caps) + +#define hnae3_dev_phy_imp_supported(hdev) \ + test_bit(HNAE3_DEV_SUPPORT_PHY_IMP_B, (hdev)->ae_dev->caps) + +#define hnae3_dev_ras_imp_supported(hdev) \ + test_bit(HNAE3_DEV_SUPPORT_RAS_IMP_B, (hdev)->ae_dev->caps) + +#define hnae3_dev_tqp_txrx_indep_supported(hdev) \ + test_bit(HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B, (hdev)->ae_dev->caps) + +#define hnae3_dev_hw_pad_supported(hdev) \ + test_bit(HNAE3_DEV_SUPPORT_HW_PAD_B, (hdev)->ae_dev->caps) + +#define hnae3_dev_stash_supported(hdev) \ + test_bit(HNAE3_DEV_SUPPORT_STASH_B, (hdev)->ae_dev->caps) + +#define hnae3_dev_pause_supported(hdev) \ + test_bit(HNAE3_DEV_SUPPORT_PAUSE_B, (hdev)->ae_dev->caps) + +#define hnae3_ae_dev_tqp_txrx_indep_supported(ae_dev) \ + test_bit(HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B, (ae_dev)->caps) + +#define hnae3_ae_dev_rxd_adv_layout_supported(ae_dev) \ + test_bit(HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B, (ae_dev)->caps) + +#define hnae3_ae_dev_mc_mac_mng_supported(ae_dev) \ + test_bit(HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, (ae_dev)->caps) + +#define hnae3_ae_dev_cq_supported(ae_dev) \ + test_bit(HNAE3_DEV_SUPPORT_CQ_B, (ae_dev)->caps) + +#define hnae3_ae_dev_fec_stats_supported(ae_dev) \ + test_bit(HNAE3_DEV_SUPPORT_FEC_STATS_B, (ae_dev)->caps) + +#define hnae3_ae_dev_lane_num_supported(ae_dev) \ + test_bit(HNAE3_DEV_SUPPORT_LANE_NUM_B, (ae_dev)->caps) + +enum HNAE3_PF_CAP_BITS { + HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B = 0, +}; +#define ring_ptr_move_fw(ring, p) \ + ((ring)->p = ((ring)->p + 1) % (ring)->desc_num) +#define ring_ptr_move_bw(ring, p) \ + ((ring)->p = ((ring)->p - 1 + (ring)->desc_num) % (ring)->desc_num) + +struct hnae3_handle; + +struct hnae3_queue { + void __iomem *io_base; + void __iomem *mem_base; + struct hnae3_ae_algo *ae_algo; + struct hnae3_handle *handle; + int tqp_index; /* index in a handle */ + u32 buf_size; /* size for hnae_desc->addr, preset by AE */ + u16 tx_desc_num; /* total number of tx desc */ + u16 rx_desc_num; /* total number of rx desc */ +}; + +struct hns3_mac_stats { + u64 tx_pause_cnt; + u64 rx_pause_cnt; +}; + +/* hnae3 loop mode */ +enum hnae3_loop { + HNAE3_LOOP_EXTERNAL, + HNAE3_LOOP_APP, + HNAE3_LOOP_SERIAL_SERDES, + HNAE3_LOOP_PARALLEL_SERDES, + HNAE3_LOOP_PHY, + HNAE3_LOOP_NONE, +}; + +enum hnae3_client_type { + HNAE3_CLIENT_KNIC, + HNAE3_CLIENT_ROCE, +}; + +/* mac media type */ +enum hnae3_media_type { + HNAE3_MEDIA_TYPE_UNKNOWN, + HNAE3_MEDIA_TYPE_FIBER, + HNAE3_MEDIA_TYPE_COPPER, + HNAE3_MEDIA_TYPE_BACKPLANE, + HNAE3_MEDIA_TYPE_NONE, +}; + +/* must be consistent with definition in firmware */ +enum hnae3_module_type { + HNAE3_MODULE_TYPE_UNKNOWN = 0x00, + HNAE3_MODULE_TYPE_FIBRE_LR = 0x01, + HNAE3_MODULE_TYPE_FIBRE_SR = 0x02, + HNAE3_MODULE_TYPE_AOC = 0x03, + HNAE3_MODULE_TYPE_CR = 0x04, + HNAE3_MODULE_TYPE_KR = 0x05, + HNAE3_MODULE_TYPE_TP = 0x06, +}; + +enum hnae3_fec_mode { + HNAE3_FEC_AUTO = 0, + HNAE3_FEC_BASER, + HNAE3_FEC_RS, + HNAE3_FEC_LLRS, + HNAE3_FEC_NONE, + HNAE3_FEC_USER_DEF, +}; + +enum hnae3_reset_notify_type { + HNAE3_UP_CLIENT, + HNAE3_DOWN_CLIENT, + HNAE3_INIT_CLIENT, + HNAE3_UNINIT_CLIENT, +}; + +enum hnae3_hw_error_type { + HNAE3_PPU_POISON_ERROR, + HNAE3_CMDQ_ECC_ERROR, + HNAE3_IMP_RD_POISON_ERROR, + HNAE3_ROCEE_AXI_RESP_ERROR, +}; + +enum hnae3_reset_type { + HNAE3_VF_RESET, + HNAE3_VF_FUNC_RESET, + HNAE3_VF_PF_FUNC_RESET, + HNAE3_VF_FULL_RESET, + HNAE3_FLR_RESET, + HNAE3_FUNC_RESET, + HNAE3_GLOBAL_RESET, + HNAE3_IMP_RESET, + HNAE3_NONE_RESET, + HNAE3_MAX_RESET, +}; + +enum hnae3_port_base_vlan_state { + HNAE3_PORT_BASE_VLAN_DISABLE, + HNAE3_PORT_BASE_VLAN_ENABLE, + HNAE3_PORT_BASE_VLAN_MODIFY, + HNAE3_PORT_BASE_VLAN_NOCHANGE, +}; + +enum hnae3_dbg_cmd { + HNAE3_DBG_CMD_TM_NODES, + HNAE3_DBG_CMD_TM_PRI, + HNAE3_DBG_CMD_TM_QSET, + HNAE3_DBG_CMD_TM_MAP, + HNAE3_DBG_CMD_TM_PG, + HNAE3_DBG_CMD_TM_PORT, + HNAE3_DBG_CMD_TC_SCH_INFO, + HNAE3_DBG_CMD_QOS_PAUSE_CFG, + HNAE3_DBG_CMD_QOS_PRI_MAP, + HNAE3_DBG_CMD_QOS_DSCP_MAP, + HNAE3_DBG_CMD_QOS_BUF_CFG, + HNAE3_DBG_CMD_DEV_INFO, + HNAE3_DBG_CMD_TX_BD, + HNAE3_DBG_CMD_RX_BD, + HNAE3_DBG_CMD_MAC_UC, + HNAE3_DBG_CMD_MAC_MC, + HNAE3_DBG_CMD_MNG_TBL, + HNAE3_DBG_CMD_LOOPBACK, + HNAE3_DBG_CMD_PTP_INFO, + HNAE3_DBG_CMD_INTERRUPT_INFO, + HNAE3_DBG_CMD_RESET_INFO, + HNAE3_DBG_CMD_IMP_INFO, + HNAE3_DBG_CMD_NCL_CONFIG, + HNAE3_DBG_CMD_REG_BIOS_COMMON, + HNAE3_DBG_CMD_REG_SSU, + HNAE3_DBG_CMD_REG_IGU_EGU, + HNAE3_DBG_CMD_REG_RPU, + HNAE3_DBG_CMD_REG_NCSI, + HNAE3_DBG_CMD_REG_RTC, + HNAE3_DBG_CMD_REG_PPP, + HNAE3_DBG_CMD_REG_RCB, + HNAE3_DBG_CMD_REG_TQP, + HNAE3_DBG_CMD_REG_MAC, + HNAE3_DBG_CMD_REG_DCB, + HNAE3_DBG_CMD_VLAN_CONFIG, + HNAE3_DBG_CMD_QUEUE_MAP, + HNAE3_DBG_CMD_RX_QUEUE_INFO, + HNAE3_DBG_CMD_TX_QUEUE_INFO, + HNAE3_DBG_CMD_FD_TCAM, + HNAE3_DBG_CMD_FD_COUNTER, + HNAE3_DBG_CMD_MAC_TNL_STATUS, + HNAE3_DBG_CMD_SERV_INFO, + HNAE3_DBG_CMD_UMV_INFO, + HNAE3_DBG_CMD_PAGE_POOL_INFO, + HNAE3_DBG_CMD_COAL_INFO, + HNAE3_DBG_CMD_UNKNOWN, +}; + +enum hnae3_tc_map_mode { + HNAE3_TC_MAP_MODE_PRIO, + HNAE3_TC_MAP_MODE_DSCP, +}; + +struct hnae3_vector_info { + u8 __iomem *io_addr; + int vector; +}; + +#define HNAE3_RING_TYPE_B 0 +#define HNAE3_RING_TYPE_TX 0 +#define HNAE3_RING_TYPE_RX 1 +#define HNAE3_RING_GL_IDX_S 0 +#define HNAE3_RING_GL_IDX_M GENMASK(1, 0) +#define HNAE3_RING_GL_RX 0 +#define HNAE3_RING_GL_TX 1 + +#define HNAE3_FW_VERSION_BYTE3_SHIFT 24 +#define HNAE3_FW_VERSION_BYTE3_MASK GENMASK(31, 24) +#define HNAE3_FW_VERSION_BYTE2_SHIFT 16 +#define HNAE3_FW_VERSION_BYTE2_MASK GENMASK(23, 16) +#define HNAE3_FW_VERSION_BYTE1_SHIFT 8 +#define HNAE3_FW_VERSION_BYTE1_MASK GENMASK(15, 8) +#define HNAE3_FW_VERSION_BYTE0_SHIFT 0 +#define HNAE3_FW_VERSION_BYTE0_MASK GENMASK(7, 0) + +struct hnae3_ring_chain_node { + struct hnae3_ring_chain_node *next; + u32 tqp_index; + u32 flag; + u32 int_gl_idx; +}; + +#define HNAE3_IS_TX_RING(node) \ + (((node)->flag & 1 << HNAE3_RING_TYPE_B) == HNAE3_RING_TYPE_TX) + +/* device specification info from firmware */ +struct hnae3_dev_specs { + u32 mac_entry_num; /* number of mac-vlan table entry */ + u32 mng_entry_num; /* number of manager table entry */ + u32 max_tm_rate; + u16 rss_ind_tbl_size; + u16 rss_key_size; + u16 int_ql_max; /* max value of interrupt coalesce based on INT_QL */ + u16 max_int_gl; /* max value of interrupt coalesce based on INT_GL */ + u8 max_non_tso_bd_num; /* max BD number of one non-TSO packet */ + u16 max_frm_size; + u16 max_qset_num; + u16 umv_size; + u16 mc_mac_size; + u32 mac_stats_num; +}; + +struct hnae3_client_ops { + int (*init_instance)(struct hnae3_handle *handle); + void (*uninit_instance)(struct hnae3_handle *handle, bool reset); + void (*link_status_change)(struct hnae3_handle *handle, bool state); + int (*reset_notify)(struct hnae3_handle *handle, + enum hnae3_reset_notify_type type); + void (*process_hw_error)(struct hnae3_handle *handle, + enum hnae3_hw_error_type); +}; + +#define HNAE3_CLIENT_NAME_LENGTH 16 +struct hnae3_client { + char name[HNAE3_CLIENT_NAME_LENGTH]; + unsigned long state; + enum hnae3_client_type type; + const struct hnae3_client_ops *ops; + struct list_head node; +}; + +#define HNAE3_DEV_CAPS_MAX_NUM 96 +struct hnae3_ae_dev { + struct pci_dev *pdev; + const struct hnae3_ae_ops *ops; + struct list_head node; + u32 flag; + unsigned long hw_err_reset_req; + struct hnae3_dev_specs dev_specs; + u32 dev_version; + DECLARE_BITMAP(caps, HNAE3_DEV_CAPS_MAX_NUM); + void *priv; +}; + +/* This struct defines the operation on the handle. + * + * init_ae_dev(): (mandatory) + * Get PF configure from pci_dev and initialize PF hardware + * uninit_ae_dev() + * Disable PF device and release PF resource + * register_client + * Register client to ae_dev + * unregister_client() + * Unregister client from ae_dev + * start() + * Enable the hardware + * stop() + * Disable the hardware + * start_client() + * Inform the hclge that client has been started + * stop_client() + * Inform the hclge that client has been stopped + * get_status() + * Get the carrier state of the back channel of the handle, 1 for ok, 0 for + * non-ok + * get_ksettings_an_result() + * Get negotiation status,speed and duplex + * get_media_type() + * Get media type of MAC + * check_port_speed() + * Check target speed whether is supported + * adjust_link() + * Adjust link status + * set_loopback() + * Set loopback + * set_promisc_mode + * Set promisc mode + * request_update_promisc_mode + * request to hclge(vf) to update promisc mode + * set_mtu() + * set mtu + * get_pauseparam() + * get tx and rx of pause frame use + * set_pauseparam() + * set tx and rx of pause frame use + * set_autoneg() + * set auto autonegotiation of pause frame use + * get_autoneg() + * get auto autonegotiation of pause frame use + * restart_autoneg() + * restart autonegotiation + * halt_autoneg() + * halt/resume autonegotiation when autonegotiation on + * get_coalesce_usecs() + * get usecs to delay a TX interrupt after a packet is sent + * get_rx_max_coalesced_frames() + * get Maximum number of packets to be sent before a TX interrupt. + * set_coalesce_usecs() + * set usecs to delay a TX interrupt after a packet is sent + * set_coalesce_frames() + * set Maximum number of packets to be sent before a TX interrupt. + * get_mac_addr() + * get mac address + * set_mac_addr() + * set mac address + * add_uc_addr + * Add unicast addr to mac table + * rm_uc_addr + * Remove unicast addr from mac table + * set_mc_addr() + * Set multicast address + * add_mc_addr + * Add multicast address to mac table + * rm_mc_addr + * Remove multicast address from mac table + * update_stats() + * Update Old network device statistics + * get_mac_stats() + * get mac pause statistics including tx_cnt and rx_cnt + * get_ethtool_stats() + * Get ethtool network device statistics + * get_strings() + * Get a set of strings that describe the requested objects + * get_sset_count() + * Get number of strings that @get_strings will write + * update_led_status() + * Update the led status + * set_led_id() + * Set led id + * get_regs() + * Get regs dump + * get_regs_len() + * Get the len of the regs dump + * get_rss_key_size() + * Get rss key size + * get_rss() + * Get rss table + * set_rss() + * Set rss table + * get_tc_size() + * Get tc size of handle + * get_vector() + * Get vector number and vector information + * put_vector() + * Put the vector in hdev + * map_ring_to_vector() + * Map rings to vector + * unmap_ring_from_vector() + * Unmap rings from vector + * reset_queue() + * Reset queue + * get_fw_version() + * Get firmware version + * get_mdix_mode() + * Get media typr of phy + * enable_vlan_filter() + * Enable vlan filter + * set_vlan_filter() + * Set vlan filter config of Ports + * set_vf_vlan_filter() + * Set vlan filter config of vf + * enable_hw_strip_rxvtag() + * Enable/disable hardware strip vlan tag of packets received + * set_gro_en + * Enable/disable HW GRO + * add_arfs_entry + * Check the 5-tuples of flow, and create flow director rule + * get_vf_config + * Get the VF configuration setting by the host + * set_vf_link_state + * Set VF link status + * set_vf_spoofchk + * Enable/disable spoof check for specified vf + * set_vf_trust + * Enable/disable trust for specified vf, if the vf being trusted, then + * it can enable promisc mode + * set_vf_rate + * Set the max tx rate of specified vf. + * set_vf_mac + * Configure the default MAC for specified VF + * get_module_eeprom + * Get the optical module eeprom info. + * add_cls_flower + * Add clsflower rule + * del_cls_flower + * Delete clsflower rule + * cls_flower_active + * Check if any cls flower rule exist + * dbg_read_cmd + * Execute debugfs read command. + * set_tx_hwts_info + * Save information for 1588 tx packet + * get_rx_hwts + * Get 1588 rx hwstamp + * get_ts_info + * Get phc info + * clean_vf_config + * Clean residual vf info after disable sriov + */ +struct hnae3_ae_ops { + int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev); + void (*uninit_ae_dev)(struct hnae3_ae_dev *ae_dev); + void (*reset_prepare)(struct hnae3_ae_dev *ae_dev, + enum hnae3_reset_type rst_type); + void (*reset_done)(struct hnae3_ae_dev *ae_dev); + int (*init_client_instance)(struct hnae3_client *client, + struct hnae3_ae_dev *ae_dev); + void (*uninit_client_instance)(struct hnae3_client *client, + struct hnae3_ae_dev *ae_dev); + int (*start)(struct hnae3_handle *handle); + void (*stop)(struct hnae3_handle *handle); + int (*client_start)(struct hnae3_handle *handle); + void (*client_stop)(struct hnae3_handle *handle); + int (*get_status)(struct hnae3_handle *handle); + void (*get_ksettings_an_result)(struct hnae3_handle *handle, + u8 *auto_neg, u32 *speed, u8 *duplex, + u32 *lane_num); + + int (*cfg_mac_speed_dup_h)(struct hnae3_handle *handle, int speed, + u8 duplex, u8 lane_num); + + void (*get_media_type)(struct hnae3_handle *handle, u8 *media_type, + u8 *module_type); + int (*check_port_speed)(struct hnae3_handle *handle, u32 speed); + void (*get_fec_stats)(struct hnae3_handle *handle, + struct ethtool_fec_stats *fec_stats); + void (*get_fec)(struct hnae3_handle *handle, u8 *fec_ability, + u8 *fec_mode); + int (*set_fec)(struct hnae3_handle *handle, u32 fec_mode); + void (*adjust_link)(struct hnae3_handle *handle, int speed, int duplex); + int (*set_loopback)(struct hnae3_handle *handle, + enum hnae3_loop loop_mode, bool en); + + int (*set_promisc_mode)(struct hnae3_handle *handle, bool en_uc_pmc, + bool en_mc_pmc); + void (*request_update_promisc_mode)(struct hnae3_handle *handle); + int (*set_mtu)(struct hnae3_handle *handle, int new_mtu); + + void (*get_pauseparam)(struct hnae3_handle *handle, + u32 *auto_neg, u32 *rx_en, u32 *tx_en); + int (*set_pauseparam)(struct hnae3_handle *handle, + u32 auto_neg, u32 rx_en, u32 tx_en); + + int (*set_autoneg)(struct hnae3_handle *handle, bool enable); + int (*get_autoneg)(struct hnae3_handle *handle); + int (*restart_autoneg)(struct hnae3_handle *handle); + int (*halt_autoneg)(struct hnae3_handle *handle, bool halt); + + void (*get_coalesce_usecs)(struct hnae3_handle *handle, + u32 *tx_usecs, u32 *rx_usecs); + void (*get_rx_max_coalesced_frames)(struct hnae3_handle *handle, + u32 *tx_frames, u32 *rx_frames); + int (*set_coalesce_usecs)(struct hnae3_handle *handle, u32 timeout); + int (*set_coalesce_frames)(struct hnae3_handle *handle, + u32 coalesce_frames); + void (*get_coalesce_range)(struct hnae3_handle *handle, + u32 *tx_frames_low, u32 *rx_frames_low, + u32 *tx_frames_high, u32 *rx_frames_high, + u32 *tx_usecs_low, u32 *rx_usecs_low, + u32 *tx_usecs_high, u32 *rx_usecs_high); + + void (*get_mac_addr)(struct hnae3_handle *handle, u8 *p); + int (*set_mac_addr)(struct hnae3_handle *handle, const void *p, + bool is_first); + int (*do_ioctl)(struct hnae3_handle *handle, + struct ifreq *ifr, int cmd); + int (*add_uc_addr)(struct hnae3_handle *handle, + const unsigned char *addr); + int (*rm_uc_addr)(struct hnae3_handle *handle, + const unsigned char *addr); + int (*set_mc_addr)(struct hnae3_handle *handle, void *addr); + int (*add_mc_addr)(struct hnae3_handle *handle, + const unsigned char *addr); + int (*rm_mc_addr)(struct hnae3_handle *handle, + const unsigned char *addr); + void (*set_tso_stats)(struct hnae3_handle *handle, int enable); + void (*update_stats)(struct hnae3_handle *handle, + struct net_device_stats *net_stats); + void (*get_stats)(struct hnae3_handle *handle, u64 *data); + void (*get_mac_stats)(struct hnae3_handle *handle, + struct hns3_mac_stats *mac_stats); + void (*get_strings)(struct hnae3_handle *handle, + u32 stringset, u8 *data); + int (*get_sset_count)(struct hnae3_handle *handle, int stringset); + + void (*get_regs)(struct hnae3_handle *handle, u32 *version, + void *data); + int (*get_regs_len)(struct hnae3_handle *handle); + + u32 (*get_rss_key_size)(struct hnae3_handle *handle); + int (*get_rss)(struct hnae3_handle *handle, u32 *indir, u8 *key, + u8 *hfunc); + int (*set_rss)(struct hnae3_handle *handle, const u32 *indir, + const u8 *key, const u8 hfunc); + int (*set_rss_tuple)(struct hnae3_handle *handle, + struct ethtool_rxnfc *cmd); + int (*get_rss_tuple)(struct hnae3_handle *handle, + struct ethtool_rxnfc *cmd); + + int (*get_tc_size)(struct hnae3_handle *handle); + + int (*get_vector)(struct hnae3_handle *handle, u16 vector_num, + struct hnae3_vector_info *vector_info); + int (*put_vector)(struct hnae3_handle *handle, int vector_num); + int (*map_ring_to_vector)(struct hnae3_handle *handle, + int vector_num, + struct hnae3_ring_chain_node *vr_chain); + int (*unmap_ring_from_vector)(struct hnae3_handle *handle, + int vector_num, + struct hnae3_ring_chain_node *vr_chain); + + int (*reset_queue)(struct hnae3_handle *handle); + u32 (*get_fw_version)(struct hnae3_handle *handle); + void (*get_mdix_mode)(struct hnae3_handle *handle, + u8 *tp_mdix_ctrl, u8 *tp_mdix); + + int (*enable_vlan_filter)(struct hnae3_handle *handle, bool enable); + int (*set_vlan_filter)(struct hnae3_handle *handle, __be16 proto, + u16 vlan_id, bool is_kill); + int (*set_vf_vlan_filter)(struct hnae3_handle *handle, int vfid, + u16 vlan, u8 qos, __be16 proto); + int (*enable_hw_strip_rxvtag)(struct hnae3_handle *handle, bool enable); + void (*reset_event)(struct pci_dev *pdev, struct hnae3_handle *handle); + enum hnae3_reset_type (*get_reset_level)(struct hnae3_ae_dev *ae_dev, + unsigned long *addr); + void (*set_default_reset_request)(struct hnae3_ae_dev *ae_dev, + enum hnae3_reset_type rst_type); + void (*get_channels)(struct hnae3_handle *handle, + struct ethtool_channels *ch); + void (*get_tqps_and_rss_info)(struct hnae3_handle *h, + u16 *alloc_tqps, u16 *max_rss_size); + int (*set_channels)(struct hnae3_handle *handle, u32 new_tqps_num, + bool rxfh_configured); + void (*get_flowctrl_adv)(struct hnae3_handle *handle, + u32 *flowctrl_adv); + int (*set_led_id)(struct hnae3_handle *handle, + enum ethtool_phys_id_state status); + void (*get_link_mode)(struct hnae3_handle *handle, + unsigned long *supported, + unsigned long *advertising); + int (*add_fd_entry)(struct hnae3_handle *handle, + struct ethtool_rxnfc *cmd); + int (*del_fd_entry)(struct hnae3_handle *handle, + struct ethtool_rxnfc *cmd); + int (*get_fd_rule_cnt)(struct hnae3_handle *handle, + struct ethtool_rxnfc *cmd); + int (*get_fd_rule_info)(struct hnae3_handle *handle, + struct ethtool_rxnfc *cmd); + int (*get_fd_all_rules)(struct hnae3_handle *handle, + struct ethtool_rxnfc *cmd, u32 *rule_locs); + void (*enable_fd)(struct hnae3_handle *handle, bool enable); + int (*add_arfs_entry)(struct hnae3_handle *handle, u16 queue_id, + u16 flow_id, struct flow_keys *fkeys); + int (*dbg_read_cmd)(struct hnae3_handle *handle, enum hnae3_dbg_cmd cmd, + char *buf, int len); + pci_ers_result_t (*handle_hw_ras_error)(struct hnae3_ae_dev *ae_dev); + bool (*get_hw_reset_stat)(struct hnae3_handle *handle); + bool (*ae_dev_resetting)(struct hnae3_handle *handle); + unsigned long (*ae_dev_reset_cnt)(struct hnae3_handle *handle); + int (*set_gro_en)(struct hnae3_handle *handle, bool enable); + u16 (*get_global_queue_id)(struct hnae3_handle *handle, u16 queue_id); + void (*set_timer_task)(struct hnae3_handle *handle, bool enable); + int (*mac_connect_phy)(struct hnae3_handle *handle); + void (*mac_disconnect_phy)(struct hnae3_handle *handle); + int (*get_vf_config)(struct hnae3_handle *handle, int vf, + struct ifla_vf_info *ivf); + int (*set_vf_link_state)(struct hnae3_handle *handle, int vf, + int link_state); + int (*set_vf_spoofchk)(struct hnae3_handle *handle, int vf, + bool enable); + int (*set_vf_trust)(struct hnae3_handle *handle, int vf, bool enable); + int (*set_vf_rate)(struct hnae3_handle *handle, int vf, + int min_tx_rate, int max_tx_rate, bool force); + int (*set_vf_mac)(struct hnae3_handle *handle, int vf, u8 *p); + int (*get_module_eeprom)(struct hnae3_handle *handle, u32 offset, + u32 len, u8 *data); + bool (*get_cmdq_stat)(struct hnae3_handle *handle); + int (*add_cls_flower)(struct hnae3_handle *handle, + struct flow_cls_offload *cls_flower, int tc); + int (*del_cls_flower)(struct hnae3_handle *handle, + struct flow_cls_offload *cls_flower); + bool (*cls_flower_active)(struct hnae3_handle *handle); + int (*get_phy_link_ksettings)(struct hnae3_handle *handle, + struct ethtool_link_ksettings *cmd); + int (*set_phy_link_ksettings)(struct hnae3_handle *handle, + const struct ethtool_link_ksettings *cmd); + bool (*set_tx_hwts_info)(struct hnae3_handle *handle, + struct sk_buff *skb); + void (*get_rx_hwts)(struct hnae3_handle *handle, struct sk_buff *skb, + u32 nsec, u32 sec); + int (*get_ts_info)(struct hnae3_handle *handle, + struct ethtool_ts_info *info); + int (*get_link_diagnosis_info)(struct hnae3_handle *handle, + u32 *status_code); + void (*clean_vf_config)(struct hnae3_ae_dev *ae_dev, int num_vfs); + int (*get_dscp_prio)(struct hnae3_handle *handle, u8 dscp, + u8 *tc_map_mode, u8 *priority); +}; + +struct hnae3_dcb_ops { + /* IEEE 802.1Qaz std */ + int (*ieee_getets)(struct hnae3_handle *, struct ieee_ets *); + int (*ieee_setets)(struct hnae3_handle *, struct ieee_ets *); + int (*ieee_getpfc)(struct hnae3_handle *, struct ieee_pfc *); + int (*ieee_setpfc)(struct hnae3_handle *, struct ieee_pfc *); + int (*ieee_setapp)(struct hnae3_handle *h, struct dcb_app *app); + int (*ieee_delapp)(struct hnae3_handle *h, struct dcb_app *app); + + /* DCBX configuration */ + u8 (*getdcbx)(struct hnae3_handle *); + u8 (*setdcbx)(struct hnae3_handle *, u8); + + int (*setup_tc)(struct hnae3_handle *handle, + struct tc_mqprio_qopt_offload *mqprio_qopt); +}; + +struct hnae3_ae_algo { + const struct hnae3_ae_ops *ops; + struct list_head node; + const struct pci_device_id *pdev_id_table; +}; + +#define HNAE3_INT_NAME_LEN 32 +#define HNAE3_ITR_COUNTDOWN_START 100 + +#define HNAE3_MAX_TC 8 +#define HNAE3_MAX_USER_PRIO 8 +struct hnae3_tc_info { + u8 prio_tc[HNAE3_MAX_USER_PRIO]; /* TC indexed by prio */ + u16 tqp_count[HNAE3_MAX_TC]; + u16 tqp_offset[HNAE3_MAX_TC]; + u8 max_tc; /* Total number of TCs */ + u8 num_tc; /* Total number of enabled TCs */ + bool mqprio_active; + bool dcb_ets_active; +}; + +#define HNAE3_MAX_DSCP 64 +#define HNAE3_PRIO_ID_INVALID 0xff +struct hnae3_knic_private_info { + struct net_device *netdev; /* Set by KNIC client when init instance */ + u16 rss_size; /* Allocated RSS queues */ + u16 req_rss_size; + u16 rx_buf_len; + u16 num_tx_desc; + u16 num_rx_desc; + u32 tx_spare_buf_size; + + struct hnae3_tc_info tc_info; + u8 tc_map_mode; + u8 dscp_app_cnt; + u8 dscp_prio[HNAE3_MAX_DSCP]; + + u16 num_tqps; /* total number of TQPs in this handle */ + struct hnae3_queue **tqp; /* array base of all TQPs in this instance */ + const struct hnae3_dcb_ops *dcb_ops; + + u16 int_rl_setting; + void __iomem *io_base; +}; + +struct hnae3_roce_private_info { + struct net_device *netdev; + void __iomem *roce_io_base; + void __iomem *roce_mem_base; + int base_vector; + int num_vectors; + + /* The below attributes defined for RoCE client, hnae3 gives + * initial values to them, and RoCE client can modify and use + * them. + */ + unsigned long reset_state; + unsigned long instance_state; + unsigned long state; +}; + +#define HNAE3_SUPPORT_APP_LOOPBACK BIT(0) +#define HNAE3_SUPPORT_PHY_LOOPBACK BIT(1) +#define HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK BIT(2) +#define HNAE3_SUPPORT_VF BIT(3) +#define HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK BIT(4) +#define HNAE3_SUPPORT_EXTERNAL_LOOPBACK BIT(5) + +#define HNAE3_USER_UPE BIT(0) /* unicast promisc enabled by user */ +#define HNAE3_USER_MPE BIT(1) /* mulitcast promisc enabled by user */ +#define HNAE3_BPE BIT(2) /* broadcast promisc enable */ +#define HNAE3_OVERFLOW_UPE BIT(3) /* unicast mac vlan overflow */ +#define HNAE3_OVERFLOW_MPE BIT(4) /* multicast mac vlan overflow */ +#define HNAE3_UPE (HNAE3_USER_UPE | HNAE3_OVERFLOW_UPE) +#define HNAE3_MPE (HNAE3_USER_MPE | HNAE3_OVERFLOW_MPE) + +enum hnae3_pflag { + HNAE3_PFLAG_LIMIT_PROMISC, + HNAE3_PFLAG_MAX +}; + +struct hnae3_handle { + struct hnae3_client *client; + struct pci_dev *pdev; + void *priv; + struct hnae3_ae_algo *ae_algo; /* the class who provides this handle */ + u64 flags; /* Indicate the capabilities for this handle */ + + union { + struct net_device *netdev; /* first member */ + struct hnae3_knic_private_info kinfo; + struct hnae3_roce_private_info rinfo; + }; + + u32 numa_node_mask; /* for multi-chip support */ + + enum hnae3_port_base_vlan_state port_base_vlan_state; + + u8 netdev_flags; + struct dentry *hnae3_dbgfs; + /* protects concurrent contention between debugfs commands */ + struct mutex dbgfs_lock; + char **dbgfs_buf; + + /* Network interface message level enabled bits */ + u32 msg_enable; + + unsigned long supported_pflags; + unsigned long priv_flags; +}; + +#define hnae3_set_field(origin, mask, shift, val) \ + do { \ + (origin) &= (~(mask)); \ + (origin) |= ((val) << (shift)) & (mask); \ + } while (0) +#define hnae3_get_field(origin, mask, shift) (((origin) & (mask)) >> (shift)) + +#define hnae3_set_bit(origin, shift, val) \ + hnae3_set_field(origin, 0x1 << (shift), shift, val) +#define hnae3_get_bit(origin, shift) \ + hnae3_get_field(origin, 0x1 << (shift), shift) + +#define HNAE3_FORMAT_MAC_ADDR_LEN 18 +#define HNAE3_FORMAT_MAC_ADDR_OFFSET_0 0 +#define HNAE3_FORMAT_MAC_ADDR_OFFSET_4 4 +#define HNAE3_FORMAT_MAC_ADDR_OFFSET_5 5 + +static inline void hnae3_format_mac_addr(char *format_mac_addr, + const u8 *mac_addr) +{ + snprintf(format_mac_addr, HNAE3_FORMAT_MAC_ADDR_LEN, "%02x:**:**:**:%02x:%02x", + mac_addr[HNAE3_FORMAT_MAC_ADDR_OFFSET_0], + mac_addr[HNAE3_FORMAT_MAC_ADDR_OFFSET_4], + mac_addr[HNAE3_FORMAT_MAC_ADDR_OFFSET_5]); +} + +int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev); +void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev); + +void hnae3_unregister_ae_algo_prepare(struct hnae3_ae_algo *ae_algo); +void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo); +void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo); + +void hnae3_unregister_client(struct hnae3_client *client); +int hnae3_register_client(struct hnae3_client *client); + +void hnae3_set_client_init_flag(struct hnae3_client *client, + struct hnae3_ae_dev *ae_dev, + unsigned int inited); +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c new file mode 100644 index 000000000..2ccb0f546 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c @@ -0,0 +1,652 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2021-2021 Hisilicon Limited. + +#include "hnae3.h" +#include "hclge_comm_cmd.h" + +static void hclge_comm_cmd_config_regs(struct hclge_comm_hw *hw, + struct hclge_comm_cmq_ring *ring) +{ + dma_addr_t dma = ring->desc_dma_addr; + u32 reg_val; + + if (ring->ring_type == HCLGE_COMM_TYPE_CSQ) { + hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG, + lower_32_bits(dma)); + hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG, + upper_32_bits(dma)); + reg_val = hclge_comm_read_dev(hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG); + reg_val &= HCLGE_COMM_NIC_SW_RST_RDY; + reg_val |= ring->desc_num >> HCLGE_COMM_NIC_CMQ_DESC_NUM_S; + hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG, reg_val); + hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG, 0); + hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_TAIL_REG, 0); + } else { + hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG, + lower_32_bits(dma)); + hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG, + upper_32_bits(dma)); + reg_val = ring->desc_num >> HCLGE_COMM_NIC_CMQ_DESC_NUM_S; + hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_DEPTH_REG, reg_val); + hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_HEAD_REG, 0); + hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG, 0); + } +} + +void hclge_comm_cmd_init_regs(struct hclge_comm_hw *hw) +{ + hclge_comm_cmd_config_regs(hw, &hw->cmq.csq); + hclge_comm_cmd_config_regs(hw, &hw->cmq.crq); +} + +void hclge_comm_cmd_reuse_desc(struct hclge_desc *desc, bool is_read) +{ + desc->flag = cpu_to_le16(HCLGE_COMM_CMD_FLAG_NO_INTR | + HCLGE_COMM_CMD_FLAG_IN); + if (is_read) + desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_WR); + else + desc->flag &= cpu_to_le16(~HCLGE_COMM_CMD_FLAG_WR); +} + +static void hclge_comm_set_default_capability(struct hnae3_ae_dev *ae_dev, + bool is_pf) +{ + set_bit(HNAE3_DEV_SUPPORT_GRO_B, ae_dev->caps); + if (is_pf) { + set_bit(HNAE3_DEV_SUPPORT_FD_B, ae_dev->caps); + set_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps); + set_bit(HNAE3_DEV_SUPPORT_PAUSE_B, ae_dev->caps); + } +} + +void hclge_comm_cmd_setup_basic_desc(struct hclge_desc *desc, + enum hclge_opcode_type opcode, + bool is_read) +{ + memset((void *)desc, 0, sizeof(struct hclge_desc)); + desc->opcode = cpu_to_le16(opcode); + desc->flag = cpu_to_le16(HCLGE_COMM_CMD_FLAG_NO_INTR | + HCLGE_COMM_CMD_FLAG_IN); + + if (is_read) + desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_WR); +} + +int hclge_comm_firmware_compat_config(struct hnae3_ae_dev *ae_dev, + struct hclge_comm_hw *hw, bool en) +{ + struct hclge_comm_firmware_compat_cmd *req; + struct hclge_desc desc; + u32 compat = 0; + + hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_IMP_COMPAT_CFG, false); + + if (en) { + req = (struct hclge_comm_firmware_compat_cmd *)desc.data; + + hnae3_set_bit(compat, HCLGE_COMM_LINK_EVENT_REPORT_EN_B, 1); + hnae3_set_bit(compat, HCLGE_COMM_NCSI_ERROR_REPORT_EN_B, 1); + if (hclge_comm_dev_phy_imp_supported(ae_dev)) + hnae3_set_bit(compat, HCLGE_COMM_PHY_IMP_EN_B, 1); + hnae3_set_bit(compat, HCLGE_COMM_MAC_STATS_EXT_EN_B, 1); + hnae3_set_bit(compat, HCLGE_COMM_SYNC_RX_RING_HEAD_EN_B, 1); + hnae3_set_bit(compat, HCLGE_COMM_LLRS_FEC_EN_B, 1); + + req->compat = cpu_to_le32(compat); + } + + return hclge_comm_cmd_send(hw, &desc, 1); +} + +void hclge_comm_free_cmd_desc(struct hclge_comm_cmq_ring *ring) +{ + int size = ring->desc_num * sizeof(struct hclge_desc); + + if (!ring->desc) + return; + + dma_free_coherent(&ring->pdev->dev, size, + ring->desc, ring->desc_dma_addr); + ring->desc = NULL; +} + +static int hclge_comm_alloc_cmd_desc(struct hclge_comm_cmq_ring *ring) +{ + int size = ring->desc_num * sizeof(struct hclge_desc); + + ring->desc = dma_alloc_coherent(&ring->pdev->dev, + size, &ring->desc_dma_addr, GFP_KERNEL); + if (!ring->desc) + return -ENOMEM; + + return 0; +} + +static __le32 hclge_comm_build_api_caps(void) +{ + u32 api_caps = 0; + + hnae3_set_bit(api_caps, HCLGE_COMM_API_CAP_FLEX_RSS_TBL_B, 1); + + return cpu_to_le32(api_caps); +} + +static const struct hclge_comm_caps_bit_map hclge_pf_cmd_caps[] = { + {HCLGE_COMM_CAP_UDP_GSO_B, HNAE3_DEV_SUPPORT_UDP_GSO_B}, + {HCLGE_COMM_CAP_PTP_B, HNAE3_DEV_SUPPORT_PTP_B}, + {HCLGE_COMM_CAP_INT_QL_B, HNAE3_DEV_SUPPORT_INT_QL_B}, + {HCLGE_COMM_CAP_TQP_TXRX_INDEP_B, HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B}, + {HCLGE_COMM_CAP_HW_TX_CSUM_B, HNAE3_DEV_SUPPORT_HW_TX_CSUM_B}, + {HCLGE_COMM_CAP_UDP_TUNNEL_CSUM_B, HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B}, + {HCLGE_COMM_CAP_FD_FORWARD_TC_B, HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B}, + {HCLGE_COMM_CAP_FEC_B, HNAE3_DEV_SUPPORT_FEC_B}, + {HCLGE_COMM_CAP_PAUSE_B, HNAE3_DEV_SUPPORT_PAUSE_B}, + {HCLGE_COMM_CAP_PHY_IMP_B, HNAE3_DEV_SUPPORT_PHY_IMP_B}, + {HCLGE_COMM_CAP_QB_B, HNAE3_DEV_SUPPORT_QB_B}, + {HCLGE_COMM_CAP_TX_PUSH_B, HNAE3_DEV_SUPPORT_TX_PUSH_B}, + {HCLGE_COMM_CAP_RAS_IMP_B, HNAE3_DEV_SUPPORT_RAS_IMP_B}, + {HCLGE_COMM_CAP_RXD_ADV_LAYOUT_B, HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B}, + {HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B, + HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B}, + {HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B, HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B}, + {HCLGE_COMM_CAP_CQ_B, HNAE3_DEV_SUPPORT_CQ_B}, + {HCLGE_COMM_CAP_GRO_B, HNAE3_DEV_SUPPORT_GRO_B}, + {HCLGE_COMM_CAP_FD_B, HNAE3_DEV_SUPPORT_FD_B}, + {HCLGE_COMM_CAP_FEC_STATS_B, HNAE3_DEV_SUPPORT_FEC_STATS_B}, + {HCLGE_COMM_CAP_LANE_NUM_B, HNAE3_DEV_SUPPORT_LANE_NUM_B}, +}; + +static const struct hclge_comm_caps_bit_map hclge_vf_cmd_caps[] = { + {HCLGE_COMM_CAP_UDP_GSO_B, HNAE3_DEV_SUPPORT_UDP_GSO_B}, + {HCLGE_COMM_CAP_INT_QL_B, HNAE3_DEV_SUPPORT_INT_QL_B}, + {HCLGE_COMM_CAP_TQP_TXRX_INDEP_B, HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B}, + {HCLGE_COMM_CAP_HW_TX_CSUM_B, HNAE3_DEV_SUPPORT_HW_TX_CSUM_B}, + {HCLGE_COMM_CAP_UDP_TUNNEL_CSUM_B, HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B}, + {HCLGE_COMM_CAP_QB_B, HNAE3_DEV_SUPPORT_QB_B}, + {HCLGE_COMM_CAP_TX_PUSH_B, HNAE3_DEV_SUPPORT_TX_PUSH_B}, + {HCLGE_COMM_CAP_RXD_ADV_LAYOUT_B, HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B}, + {HCLGE_COMM_CAP_CQ_B, HNAE3_DEV_SUPPORT_CQ_B}, + {HCLGE_COMM_CAP_GRO_B, HNAE3_DEV_SUPPORT_GRO_B}, +}; + +static void +hclge_comm_capability_to_bitmap(unsigned long *bitmap, __le32 *caps) +{ + const unsigned int words = HCLGE_COMM_QUERY_CAP_LENGTH; + u32 val[HCLGE_COMM_QUERY_CAP_LENGTH]; + unsigned int i; + + for (i = 0; i < words; i++) + val[i] = __le32_to_cpu(caps[i]); + + bitmap_from_arr32(bitmap, val, + HCLGE_COMM_QUERY_CAP_LENGTH * BITS_PER_TYPE(u32)); +} + +static void +hclge_comm_parse_capability(struct hnae3_ae_dev *ae_dev, bool is_pf, + struct hclge_comm_query_version_cmd *cmd) +{ + const struct hclge_comm_caps_bit_map *caps_map = + is_pf ? hclge_pf_cmd_caps : hclge_vf_cmd_caps; + u32 size = is_pf ? ARRAY_SIZE(hclge_pf_cmd_caps) : + ARRAY_SIZE(hclge_vf_cmd_caps); + DECLARE_BITMAP(caps, HCLGE_COMM_QUERY_CAP_LENGTH * BITS_PER_TYPE(u32)); + u32 i; + + hclge_comm_capability_to_bitmap(caps, cmd->caps); + for (i = 0; i < size; i++) + if (test_bit(caps_map[i].imp_bit, caps)) + set_bit(caps_map[i].local_bit, ae_dev->caps); +} + +int hclge_comm_alloc_cmd_queue(struct hclge_comm_hw *hw, int ring_type) +{ + struct hclge_comm_cmq_ring *ring = + (ring_type == HCLGE_COMM_TYPE_CSQ) ? &hw->cmq.csq : + &hw->cmq.crq; + int ret; + + ring->ring_type = ring_type; + + ret = hclge_comm_alloc_cmd_desc(ring); + if (ret) + dev_err(&ring->pdev->dev, "descriptor %s alloc error %d\n", + (ring_type == HCLGE_COMM_TYPE_CSQ) ? "CSQ" : "CRQ", + ret); + + return ret; +} + +int hclge_comm_cmd_query_version_and_capability(struct hnae3_ae_dev *ae_dev, + struct hclge_comm_hw *hw, + u32 *fw_version, bool is_pf) +{ + struct hclge_comm_query_version_cmd *resp; + struct hclge_desc desc; + int ret; + + hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FW_VER, 1); + resp = (struct hclge_comm_query_version_cmd *)desc.data; + resp->api_caps = hclge_comm_build_api_caps(); + + ret = hclge_comm_cmd_send(hw, &desc, 1); + if (ret) + return ret; + + *fw_version = le32_to_cpu(resp->firmware); + + ae_dev->dev_version = le32_to_cpu(resp->hardware) << + HNAE3_PCI_REVISION_BIT_SIZE; + ae_dev->dev_version |= ae_dev->pdev->revision; + + if (ae_dev->dev_version == HNAE3_DEVICE_VERSION_V2) { + hclge_comm_set_default_capability(ae_dev, is_pf); + return 0; + } + + hclge_comm_parse_capability(ae_dev, is_pf, resp); + + return ret; +} + +static const u16 spec_opcode[] = { HCLGE_OPC_STATS_64_BIT, + HCLGE_OPC_STATS_32_BIT, + HCLGE_OPC_STATS_MAC, + HCLGE_OPC_STATS_MAC_ALL, + HCLGE_OPC_QUERY_32_BIT_REG, + HCLGE_OPC_QUERY_64_BIT_REG, + HCLGE_QUERY_CLEAR_MPF_RAS_INT, + HCLGE_QUERY_CLEAR_PF_RAS_INT, + HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT, + HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT, + HCLGE_QUERY_ALL_ERR_INFO }; + +static bool hclge_comm_is_special_opcode(u16 opcode) +{ + /* these commands have several descriptors, + * and use the first one to save opcode and return value + */ + u32 i; + + for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) + if (spec_opcode[i] == opcode) + return true; + + return false; +} + +static int hclge_comm_ring_space(struct hclge_comm_cmq_ring *ring) +{ + int ntc = ring->next_to_clean; + int ntu = ring->next_to_use; + int used = (ntu - ntc + ring->desc_num) % ring->desc_num; + + return ring->desc_num - used - 1; +} + +static void hclge_comm_cmd_copy_desc(struct hclge_comm_hw *hw, + struct hclge_desc *desc, int num) +{ + struct hclge_desc *desc_to_use; + int handle = 0; + + while (handle < num) { + desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use]; + *desc_to_use = desc[handle]; + (hw->cmq.csq.next_to_use)++; + if (hw->cmq.csq.next_to_use >= hw->cmq.csq.desc_num) + hw->cmq.csq.next_to_use = 0; + handle++; + } +} + +static int hclge_comm_is_valid_csq_clean_head(struct hclge_comm_cmq_ring *ring, + int head) +{ + int ntc = ring->next_to_clean; + int ntu = ring->next_to_use; + + if (ntu > ntc) + return head >= ntc && head <= ntu; + + return head >= ntc || head <= ntu; +} + +static int hclge_comm_cmd_csq_clean(struct hclge_comm_hw *hw) +{ + struct hclge_comm_cmq_ring *csq = &hw->cmq.csq; + int clean; + u32 head; + + head = hclge_comm_read_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG); + rmb(); /* Make sure head is ready before touch any data */ + + if (!hclge_comm_is_valid_csq_clean_head(csq, head)) { + dev_warn(&hw->cmq.csq.pdev->dev, "wrong cmd head (%u, %d-%d)\n", + head, csq->next_to_use, csq->next_to_clean); + dev_warn(&hw->cmq.csq.pdev->dev, + "Disabling any further commands to IMP firmware\n"); + set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state); + dev_warn(&hw->cmq.csq.pdev->dev, + "IMP firmware watchdog reset soon expected!\n"); + return -EIO; + } + + clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num; + csq->next_to_clean = head; + return clean; +} + +static int hclge_comm_cmd_csq_done(struct hclge_comm_hw *hw) +{ + u32 head = hclge_comm_read_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG); + return head == hw->cmq.csq.next_to_use; +} + +static u32 hclge_get_cmdq_tx_timeout(u16 opcode, u32 tx_timeout) +{ + static const struct hclge_cmdq_tx_timeout_map cmdq_tx_timeout_map[] = { + {HCLGE_OPC_CFG_RST_TRIGGER, HCLGE_COMM_CMDQ_TX_TIMEOUT_500MS}, + }; + u32 i; + + for (i = 0; i < ARRAY_SIZE(cmdq_tx_timeout_map); i++) + if (cmdq_tx_timeout_map[i].opcode == opcode) + return cmdq_tx_timeout_map[i].tx_timeout; + + return tx_timeout; +} + +static void hclge_comm_wait_for_resp(struct hclge_comm_hw *hw, u16 opcode, + bool *is_completed) +{ + u32 cmdq_tx_timeout = hclge_get_cmdq_tx_timeout(opcode, + hw->cmq.tx_timeout); + u32 timeout = 0; + + do { + if (hclge_comm_cmd_csq_done(hw)) { + *is_completed = true; + break; + } + udelay(1); + timeout++; + } while (timeout < cmdq_tx_timeout); +} + +static int hclge_comm_cmd_convert_err_code(u16 desc_ret) +{ + struct hclge_comm_errcode hclge_comm_cmd_errcode[] = { + { HCLGE_COMM_CMD_EXEC_SUCCESS, 0 }, + { HCLGE_COMM_CMD_NO_AUTH, -EPERM }, + { HCLGE_COMM_CMD_NOT_SUPPORTED, -EOPNOTSUPP }, + { HCLGE_COMM_CMD_QUEUE_FULL, -EXFULL }, + { HCLGE_COMM_CMD_NEXT_ERR, -ENOSR }, + { HCLGE_COMM_CMD_UNEXE_ERR, -ENOTBLK }, + { HCLGE_COMM_CMD_PARA_ERR, -EINVAL }, + { HCLGE_COMM_CMD_RESULT_ERR, -ERANGE }, + { HCLGE_COMM_CMD_TIMEOUT, -ETIME }, + { HCLGE_COMM_CMD_HILINK_ERR, -ENOLINK }, + { HCLGE_COMM_CMD_QUEUE_ILLEGAL, -ENXIO }, + { HCLGE_COMM_CMD_INVALID, -EBADR }, + }; + u32 errcode_count = ARRAY_SIZE(hclge_comm_cmd_errcode); + u32 i; + + for (i = 0; i < errcode_count; i++) + if (hclge_comm_cmd_errcode[i].imp_errcode == desc_ret) + return hclge_comm_cmd_errcode[i].common_errno; + + return -EIO; +} + +static int hclge_comm_cmd_check_retval(struct hclge_comm_hw *hw, + struct hclge_desc *desc, int num, + int ntc) +{ + u16 opcode, desc_ret; + int handle; + + opcode = le16_to_cpu(desc[0].opcode); + for (handle = 0; handle < num; handle++) { + desc[handle] = hw->cmq.csq.desc[ntc]; + ntc++; + if (ntc >= hw->cmq.csq.desc_num) + ntc = 0; + } + if (likely(!hclge_comm_is_special_opcode(opcode))) + desc_ret = le16_to_cpu(desc[num - 1].retval); + else + desc_ret = le16_to_cpu(desc[0].retval); + + hw->cmq.last_status = desc_ret; + + return hclge_comm_cmd_convert_err_code(desc_ret); +} + +static int hclge_comm_cmd_check_result(struct hclge_comm_hw *hw, + struct hclge_desc *desc, + int num, int ntc) +{ + bool is_completed = false; + int handle, ret; + + /* If the command is sync, wait for the firmware to write back, + * if multi descriptors to be sent, use the first one to check + */ + if (HCLGE_COMM_SEND_SYNC(le16_to_cpu(desc->flag))) + hclge_comm_wait_for_resp(hw, le16_to_cpu(desc->opcode), + &is_completed); + + if (!is_completed) + ret = -EBADE; + else + ret = hclge_comm_cmd_check_retval(hw, desc, num, ntc); + + /* Clean the command send queue */ + handle = hclge_comm_cmd_csq_clean(hw); + if (handle < 0) + ret = handle; + else if (handle != num) + dev_warn(&hw->cmq.csq.pdev->dev, + "cleaned %d, need to clean %d\n", handle, num); + return ret; +} + +/** + * hclge_comm_cmd_send - send command to command queue + * @hw: pointer to the hw struct + * @desc: prefilled descriptor for describing the command + * @num : the number of descriptors to be sent + * + * This is the main send command for command queue, it + * sends the queue, cleans the queue, etc + **/ +int hclge_comm_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc, + int num) +{ + struct hclge_comm_cmq_ring *csq = &hw->cmq.csq; + int ret; + int ntc; + + spin_lock_bh(&hw->cmq.csq.lock); + + if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state)) { + spin_unlock_bh(&hw->cmq.csq.lock); + return -EBUSY; + } + + if (num > hclge_comm_ring_space(&hw->cmq.csq)) { + /* If CMDQ ring is full, SW HEAD and HW HEAD may be different, + * need update the SW HEAD pointer csq->next_to_clean + */ + csq->next_to_clean = + hclge_comm_read_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG); + spin_unlock_bh(&hw->cmq.csq.lock); + return -EBUSY; + } + + /** + * Record the location of desc in the ring for this time + * which will be use for hardware to write back + */ + ntc = hw->cmq.csq.next_to_use; + + hclge_comm_cmd_copy_desc(hw, desc, num); + + /* Write to hardware */ + hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_TAIL_REG, + hw->cmq.csq.next_to_use); + + ret = hclge_comm_cmd_check_result(hw, desc, num, ntc); + + spin_unlock_bh(&hw->cmq.csq.lock); + + return ret; +} + +static void hclge_comm_cmd_uninit_regs(struct hclge_comm_hw *hw) +{ + hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG, 0); + hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG, 0); + hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG, 0); + hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG, 0); + hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_TAIL_REG, 0); + hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG, 0); + hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG, 0); + hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_DEPTH_REG, 0); + hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_HEAD_REG, 0); + hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG, 0); +} + +void hclge_comm_cmd_uninit(struct hnae3_ae_dev *ae_dev, + struct hclge_comm_hw *hw) +{ + struct hclge_comm_cmq *cmdq = &hw->cmq; + + hclge_comm_firmware_compat_config(ae_dev, hw, false); + set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state); + + /* wait to ensure that the firmware completes the possible left + * over commands. + */ + msleep(HCLGE_COMM_CMDQ_CLEAR_WAIT_TIME); + spin_lock_bh(&cmdq->csq.lock); + spin_lock(&cmdq->crq.lock); + hclge_comm_cmd_uninit_regs(hw); + spin_unlock(&cmdq->crq.lock); + spin_unlock_bh(&cmdq->csq.lock); + + hclge_comm_free_cmd_desc(&cmdq->csq); + hclge_comm_free_cmd_desc(&cmdq->crq); +} + +int hclge_comm_cmd_queue_init(struct pci_dev *pdev, struct hclge_comm_hw *hw) +{ + struct hclge_comm_cmq *cmdq = &hw->cmq; + int ret; + + /* Setup the lock for command queue */ + spin_lock_init(&cmdq->csq.lock); + spin_lock_init(&cmdq->crq.lock); + + cmdq->csq.pdev = pdev; + cmdq->crq.pdev = pdev; + + /* Setup the queue entries for use cmd queue */ + cmdq->csq.desc_num = HCLGE_COMM_NIC_CMQ_DESC_NUM; + cmdq->crq.desc_num = HCLGE_COMM_NIC_CMQ_DESC_NUM; + + /* Setup Tx write back timeout */ + cmdq->tx_timeout = HCLGE_COMM_CMDQ_TX_TIMEOUT_DEFAULT; + + /* Setup queue rings */ + ret = hclge_comm_alloc_cmd_queue(hw, HCLGE_COMM_TYPE_CSQ); + if (ret) { + dev_err(&pdev->dev, "CSQ ring setup error %d\n", ret); + return ret; + } + + ret = hclge_comm_alloc_cmd_queue(hw, HCLGE_COMM_TYPE_CRQ); + if (ret) { + dev_err(&pdev->dev, "CRQ ring setup error %d\n", ret); + goto err_csq; + } + + return 0; +err_csq: + hclge_comm_free_cmd_desc(&hw->cmq.csq); + return ret; +} + +int hclge_comm_cmd_init(struct hnae3_ae_dev *ae_dev, struct hclge_comm_hw *hw, + u32 *fw_version, bool is_pf, + unsigned long reset_pending) +{ + struct hclge_comm_cmq *cmdq = &hw->cmq; + int ret; + + spin_lock_bh(&cmdq->csq.lock); + spin_lock(&cmdq->crq.lock); + + cmdq->csq.next_to_clean = 0; + cmdq->csq.next_to_use = 0; + cmdq->crq.next_to_clean = 0; + cmdq->crq.next_to_use = 0; + + hclge_comm_cmd_init_regs(hw); + + spin_unlock(&cmdq->crq.lock); + spin_unlock_bh(&cmdq->csq.lock); + + clear_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state); + + /* Check if there is new reset pending, because the higher level + * reset may happen when lower level reset is being processed. + */ + if (reset_pending) { + ret = -EBUSY; + goto err_cmd_init; + } + + /* get version and device capabilities */ + ret = hclge_comm_cmd_query_version_and_capability(ae_dev, hw, + fw_version, is_pf); + if (ret) { + dev_err(&ae_dev->pdev->dev, + "failed to query version and capabilities, ret = %d\n", + ret); + goto err_cmd_init; + } + + dev_info(&ae_dev->pdev->dev, + "The firmware version is %lu.%lu.%lu.%lu\n", + hnae3_get_field(*fw_version, HNAE3_FW_VERSION_BYTE3_MASK, + HNAE3_FW_VERSION_BYTE3_SHIFT), + hnae3_get_field(*fw_version, HNAE3_FW_VERSION_BYTE2_MASK, + HNAE3_FW_VERSION_BYTE2_SHIFT), + hnae3_get_field(*fw_version, HNAE3_FW_VERSION_BYTE1_MASK, + HNAE3_FW_VERSION_BYTE1_SHIFT), + hnae3_get_field(*fw_version, HNAE3_FW_VERSION_BYTE0_MASK, + HNAE3_FW_VERSION_BYTE0_SHIFT)); + + if (!is_pf && ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) + return 0; + + /* ask the firmware to enable some features, driver can work without + * it. + */ + ret = hclge_comm_firmware_compat_config(ae_dev, hw, true); + if (ret) + dev_warn(&ae_dev->pdev->dev, + "Firmware compatible features not enabled(%d).\n", + ret); + return 0; + +err_cmd_init: + set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state); + + return ret; +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h new file mode 100644 index 000000000..2b2928c6d --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h @@ -0,0 +1,471 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +// Copyright (c) 2021-2021 Hisilicon Limited. + +#ifndef __HCLGE_COMM_CMD_H +#define __HCLGE_COMM_CMD_H +#include + +#include "hnae3.h" + +#define HCLGE_COMM_CMD_FLAG_IN BIT(0) +#define HCLGE_COMM_CMD_FLAG_NEXT BIT(2) +#define HCLGE_COMM_CMD_FLAG_WR BIT(3) +#define HCLGE_COMM_CMD_FLAG_NO_INTR BIT(4) + +#define HCLGE_COMM_SEND_SYNC(flag) \ + ((flag) & HCLGE_COMM_CMD_FLAG_NO_INTR) + +#define HCLGE_COMM_LINK_EVENT_REPORT_EN_B 0 +#define HCLGE_COMM_NCSI_ERROR_REPORT_EN_B 1 +#define HCLGE_COMM_PHY_IMP_EN_B 2 +#define HCLGE_COMM_MAC_STATS_EXT_EN_B 3 +#define HCLGE_COMM_SYNC_RX_RING_HEAD_EN_B 4 +#define HCLGE_COMM_LLRS_FEC_EN_B 5 + +#define hclge_comm_dev_phy_imp_supported(ae_dev) \ + test_bit(HNAE3_DEV_SUPPORT_PHY_IMP_B, (ae_dev)->caps) + +#define HCLGE_COMM_TYPE_CRQ 0 +#define HCLGE_COMM_TYPE_CSQ 1 + +#define HCLGE_COMM_CMDQ_CLEAR_WAIT_TIME 200 + +/* bar registers for cmdq */ +#define HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG 0x27000 +#define HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG 0x27004 +#define HCLGE_COMM_NIC_CSQ_DEPTH_REG 0x27008 +#define HCLGE_COMM_NIC_CSQ_TAIL_REG 0x27010 +#define HCLGE_COMM_NIC_CSQ_HEAD_REG 0x27014 +#define HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG 0x27018 +#define HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG 0x2701C +#define HCLGE_COMM_NIC_CRQ_DEPTH_REG 0x27020 +#define HCLGE_COMM_NIC_CRQ_TAIL_REG 0x27024 +#define HCLGE_COMM_NIC_CRQ_HEAD_REG 0x27028 +/* Vector0 interrupt CMDQ event source register(RW) */ +#define HCLGE_COMM_VECTOR0_CMDQ_SRC_REG 0x27100 +/* Vector0 interrupt CMDQ event status register(RO) */ +#define HCLGE_COMM_VECTOR0_CMDQ_STATE_REG 0x27104 +#define HCLGE_COMM_CMDQ_INTR_EN_REG 0x27108 +#define HCLGE_COMM_CMDQ_INTR_GEN_REG 0x2710C +#define HCLGE_COMM_CMDQ_INTR_STS_REG 0x27104 + +/* this bit indicates that the driver is ready for hardware reset */ +#define HCLGE_COMM_NIC_SW_RST_RDY_B 16 +#define HCLGE_COMM_NIC_SW_RST_RDY BIT(HCLGE_COMM_NIC_SW_RST_RDY_B) +#define HCLGE_COMM_NIC_CMQ_DESC_NUM_S 3 +#define HCLGE_COMM_NIC_CMQ_DESC_NUM 1024 +#define HCLGE_COMM_CMDQ_TX_TIMEOUT_DEFAULT 30000 +#define HCLGE_COMM_CMDQ_TX_TIMEOUT_500MS 500000 + +enum hclge_opcode_type { + /* Generic commands */ + HCLGE_OPC_QUERY_FW_VER = 0x0001, + HCLGE_OPC_CFG_RST_TRIGGER = 0x0020, + HCLGE_OPC_GBL_RST_STATUS = 0x0021, + HCLGE_OPC_QUERY_FUNC_STATUS = 0x0022, + HCLGE_OPC_QUERY_PF_RSRC = 0x0023, + HCLGE_OPC_QUERY_VF_RSRC = 0x0024, + HCLGE_OPC_GET_CFG_PARAM = 0x0025, + HCLGE_OPC_PF_RST_DONE = 0x0026, + HCLGE_OPC_QUERY_VF_RST_RDY = 0x0027, + + HCLGE_OPC_STATS_64_BIT = 0x0030, + HCLGE_OPC_STATS_32_BIT = 0x0031, + HCLGE_OPC_STATS_MAC = 0x0032, + HCLGE_OPC_QUERY_MAC_REG_NUM = 0x0033, + HCLGE_OPC_STATS_MAC_ALL = 0x0034, + + HCLGE_OPC_QUERY_REG_NUM = 0x0040, + HCLGE_OPC_QUERY_32_BIT_REG = 0x0041, + HCLGE_OPC_QUERY_64_BIT_REG = 0x0042, + HCLGE_OPC_DFX_BD_NUM = 0x0043, + HCLGE_OPC_DFX_BIOS_COMMON_REG = 0x0044, + HCLGE_OPC_DFX_SSU_REG_0 = 0x0045, + HCLGE_OPC_DFX_SSU_REG_1 = 0x0046, + HCLGE_OPC_DFX_IGU_EGU_REG = 0x0047, + HCLGE_OPC_DFX_RPU_REG_0 = 0x0048, + HCLGE_OPC_DFX_RPU_REG_1 = 0x0049, + HCLGE_OPC_DFX_NCSI_REG = 0x004A, + HCLGE_OPC_DFX_RTC_REG = 0x004B, + HCLGE_OPC_DFX_PPP_REG = 0x004C, + HCLGE_OPC_DFX_RCB_REG = 0x004D, + HCLGE_OPC_DFX_TQP_REG = 0x004E, + HCLGE_OPC_DFX_SSU_REG_2 = 0x004F, + + HCLGE_OPC_QUERY_DEV_SPECS = 0x0050, + + /* MAC command */ + HCLGE_OPC_CONFIG_MAC_MODE = 0x0301, + HCLGE_OPC_CONFIG_AN_MODE = 0x0304, + HCLGE_OPC_QUERY_LINK_STATUS = 0x0307, + HCLGE_OPC_CONFIG_MAX_FRM_SIZE = 0x0308, + HCLGE_OPC_CONFIG_SPEED_DUP = 0x0309, + HCLGE_OPC_QUERY_MAC_TNL_INT = 0x0310, + HCLGE_OPC_MAC_TNL_INT_EN = 0x0311, + HCLGE_OPC_CLEAR_MAC_TNL_INT = 0x0312, + HCLGE_OPC_COMMON_LOOPBACK = 0x0315, + HCLGE_OPC_QUERY_FEC_STATS = 0x0316, + HCLGE_OPC_CONFIG_FEC_MODE = 0x031A, + HCLGE_OPC_QUERY_ROH_TYPE_INFO = 0x0389, + + /* PTP commands */ + HCLGE_OPC_PTP_INT_EN = 0x0501, + HCLGE_OPC_PTP_MODE_CFG = 0x0507, + + /* PFC/Pause commands */ + HCLGE_OPC_CFG_MAC_PAUSE_EN = 0x0701, + HCLGE_OPC_CFG_PFC_PAUSE_EN = 0x0702, + HCLGE_OPC_CFG_MAC_PARA = 0x0703, + HCLGE_OPC_CFG_PFC_PARA = 0x0704, + HCLGE_OPC_QUERY_MAC_TX_PKT_CNT = 0x0705, + HCLGE_OPC_QUERY_MAC_RX_PKT_CNT = 0x0706, + HCLGE_OPC_QUERY_PFC_TX_PKT_CNT = 0x0707, + HCLGE_OPC_QUERY_PFC_RX_PKT_CNT = 0x0708, + HCLGE_OPC_PRI_TO_TC_MAPPING = 0x0709, + HCLGE_OPC_QOS_MAP = 0x070A, + + /* ETS/scheduler commands */ + HCLGE_OPC_TM_PG_TO_PRI_LINK = 0x0804, + HCLGE_OPC_TM_QS_TO_PRI_LINK = 0x0805, + HCLGE_OPC_TM_NQ_TO_QS_LINK = 0x0806, + HCLGE_OPC_TM_RQ_TO_QS_LINK = 0x0807, + HCLGE_OPC_TM_PORT_WEIGHT = 0x0808, + HCLGE_OPC_TM_PG_WEIGHT = 0x0809, + HCLGE_OPC_TM_QS_WEIGHT = 0x080A, + HCLGE_OPC_TM_PRI_WEIGHT = 0x080B, + HCLGE_OPC_TM_PRI_C_SHAPPING = 0x080C, + HCLGE_OPC_TM_PRI_P_SHAPPING = 0x080D, + HCLGE_OPC_TM_PG_C_SHAPPING = 0x080E, + HCLGE_OPC_TM_PG_P_SHAPPING = 0x080F, + HCLGE_OPC_TM_PORT_SHAPPING = 0x0810, + HCLGE_OPC_TM_PG_SCH_MODE_CFG = 0x0812, + HCLGE_OPC_TM_PRI_SCH_MODE_CFG = 0x0813, + HCLGE_OPC_TM_QS_SCH_MODE_CFG = 0x0814, + HCLGE_OPC_TM_BP_TO_QSET_MAPPING = 0x0815, + HCLGE_OPC_TM_NODES = 0x0816, + HCLGE_OPC_ETS_TC_WEIGHT = 0x0843, + HCLGE_OPC_QSET_DFX_STS = 0x0844, + HCLGE_OPC_PRI_DFX_STS = 0x0845, + HCLGE_OPC_PG_DFX_STS = 0x0846, + HCLGE_OPC_PORT_DFX_STS = 0x0847, + HCLGE_OPC_SCH_NQ_CNT = 0x0848, + HCLGE_OPC_SCH_RQ_CNT = 0x0849, + HCLGE_OPC_TM_INTERNAL_STS = 0x0850, + HCLGE_OPC_TM_INTERNAL_CNT = 0x0851, + HCLGE_OPC_TM_INTERNAL_STS_1 = 0x0852, + + /* Packet buffer allocate commands */ + HCLGE_OPC_TX_BUFF_ALLOC = 0x0901, + HCLGE_OPC_RX_PRIV_BUFF_ALLOC = 0x0902, + HCLGE_OPC_RX_PRIV_WL_ALLOC = 0x0903, + HCLGE_OPC_RX_COM_THRD_ALLOC = 0x0904, + HCLGE_OPC_RX_COM_WL_ALLOC = 0x0905, + HCLGE_OPC_RX_GBL_PKT_CNT = 0x0906, + + /* TQP management command */ + HCLGE_OPC_SET_TQP_MAP = 0x0A01, + + /* TQP commands */ + HCLGE_OPC_CFG_TX_QUEUE = 0x0B01, + HCLGE_OPC_QUERY_TX_POINTER = 0x0B02, + HCLGE_OPC_QUERY_TX_STATS = 0x0B03, + HCLGE_OPC_TQP_TX_QUEUE_TC = 0x0B04, + HCLGE_OPC_CFG_RX_QUEUE = 0x0B11, + HCLGE_OPC_QUERY_RX_POINTER = 0x0B12, + HCLGE_OPC_QUERY_RX_STATS = 0x0B13, + HCLGE_OPC_STASH_RX_QUEUE_LRO = 0x0B16, + HCLGE_OPC_CFG_RX_QUEUE_LRO = 0x0B17, + HCLGE_OPC_CFG_COM_TQP_QUEUE = 0x0B20, + HCLGE_OPC_RESET_TQP_QUEUE = 0x0B22, + + /* PPU commands */ + HCLGE_OPC_PPU_PF_OTHER_INT_DFX = 0x0B4A, + + /* TSO command */ + HCLGE_OPC_TSO_GENERIC_CONFIG = 0x0C01, + HCLGE_OPC_GRO_GENERIC_CONFIG = 0x0C10, + + /* RSS commands */ + HCLGE_OPC_RSS_GENERIC_CONFIG = 0x0D01, + HCLGE_OPC_RSS_INDIR_TABLE = 0x0D07, + HCLGE_OPC_RSS_TC_MODE = 0x0D08, + HCLGE_OPC_RSS_INPUT_TUPLE = 0x0D02, + + /* Promisuous mode command */ + HCLGE_OPC_CFG_PROMISC_MODE = 0x0E01, + + /* Vlan offload commands */ + HCLGE_OPC_VLAN_PORT_TX_CFG = 0x0F01, + HCLGE_OPC_VLAN_PORT_RX_CFG = 0x0F02, + + /* Interrupts commands */ + HCLGE_OPC_ADD_RING_TO_VECTOR = 0x1503, + HCLGE_OPC_DEL_RING_TO_VECTOR = 0x1504, + + /* MAC commands */ + HCLGE_OPC_MAC_VLAN_ADD = 0x1000, + HCLGE_OPC_MAC_VLAN_REMOVE = 0x1001, + HCLGE_OPC_MAC_VLAN_TYPE_ID = 0x1002, + HCLGE_OPC_MAC_VLAN_INSERT = 0x1003, + HCLGE_OPC_MAC_VLAN_ALLOCATE = 0x1004, + HCLGE_OPC_MAC_ETHTYPE_ADD = 0x1010, + HCLGE_OPC_MAC_ETHTYPE_REMOVE = 0x1011, + + /* MAC VLAN commands */ + HCLGE_OPC_MAC_VLAN_SWITCH_PARAM = 0x1033, + + /* VLAN commands */ + HCLGE_OPC_VLAN_FILTER_CTRL = 0x1100, + HCLGE_OPC_VLAN_FILTER_PF_CFG = 0x1101, + HCLGE_OPC_VLAN_FILTER_VF_CFG = 0x1102, + HCLGE_OPC_PORT_VLAN_BYPASS = 0x1103, + + /* Flow Director commands */ + HCLGE_OPC_FD_MODE_CTRL = 0x1200, + HCLGE_OPC_FD_GET_ALLOCATION = 0x1201, + HCLGE_OPC_FD_KEY_CONFIG = 0x1202, + HCLGE_OPC_FD_TCAM_OP = 0x1203, + HCLGE_OPC_FD_AD_OP = 0x1204, + HCLGE_OPC_FD_CNT_OP = 0x1205, + HCLGE_OPC_FD_USER_DEF_OP = 0x1207, + HCLGE_OPC_FD_QB_CTRL = 0x1210, + HCLGE_OPC_FD_QB_AD_OP = 0x1211, + + /* MDIO command */ + HCLGE_OPC_MDIO_CONFIG = 0x1900, + + /* QCN commands */ + HCLGE_OPC_QCN_MOD_CFG = 0x1A01, + HCLGE_OPC_QCN_GRP_TMPLT_CFG = 0x1A02, + HCLGE_OPC_QCN_SHAPPING_CFG = 0x1A03, + HCLGE_OPC_QCN_SHAPPING_BS_CFG = 0x1A04, + HCLGE_OPC_QCN_QSET_LINK_CFG = 0x1A05, + HCLGE_OPC_QCN_RP_STATUS_GET = 0x1A06, + HCLGE_OPC_QCN_AJUST_INIT = 0x1A07, + HCLGE_OPC_QCN_DFX_CNT_STATUS = 0x1A08, + + /* Mailbox command */ + HCLGEVF_OPC_MBX_PF_TO_VF = 0x2000, + HCLGEVF_OPC_MBX_VF_TO_PF = 0x2001, + + /* Led command */ + HCLGE_OPC_LED_STATUS_CFG = 0xB000, + + /* clear hardware resource command */ + HCLGE_OPC_CLEAR_HW_RESOURCE = 0x700B, + + /* NCL config command */ + HCLGE_OPC_QUERY_NCL_CONFIG = 0x7011, + + /* IMP stats command */ + HCLGE_OPC_IMP_STATS_BD = 0x7012, + HCLGE_OPC_IMP_STATS_INFO = 0x7013, + HCLGE_OPC_IMP_COMPAT_CFG = 0x701A, + + /* SFP command */ + HCLGE_OPC_GET_SFP_EEPROM = 0x7100, + HCLGE_OPC_GET_SFP_EXIST = 0x7101, + HCLGE_OPC_GET_SFP_INFO = 0x7104, + + /* Error INT commands */ + HCLGE_MAC_COMMON_INT_EN = 0x030E, + HCLGE_TM_SCH_ECC_INT_EN = 0x0829, + HCLGE_SSU_ECC_INT_CMD = 0x0989, + HCLGE_SSU_COMMON_INT_CMD = 0x098C, + HCLGE_PPU_MPF_ECC_INT_CMD = 0x0B40, + HCLGE_PPU_MPF_OTHER_INT_CMD = 0x0B41, + HCLGE_PPU_PF_OTHER_INT_CMD = 0x0B42, + HCLGE_COMMON_ECC_INT_CFG = 0x1505, + HCLGE_QUERY_RAS_INT_STS_BD_NUM = 0x1510, + HCLGE_QUERY_CLEAR_MPF_RAS_INT = 0x1511, + HCLGE_QUERY_CLEAR_PF_RAS_INT = 0x1512, + HCLGE_QUERY_MSIX_INT_STS_BD_NUM = 0x1513, + HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT = 0x1514, + HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT = 0x1515, + HCLGE_QUERY_ALL_ERR_BD_NUM = 0x1516, + HCLGE_QUERY_ALL_ERR_INFO = 0x1517, + HCLGE_CONFIG_ROCEE_RAS_INT_EN = 0x1580, + HCLGE_QUERY_CLEAR_ROCEE_RAS_INT = 0x1581, + HCLGE_ROCEE_PF_RAS_INT_CMD = 0x1584, + HCLGE_QUERY_ROCEE_ECC_RAS_INFO_CMD = 0x1585, + HCLGE_QUERY_ROCEE_AXI_RAS_INFO_CMD = 0x1586, + HCLGE_IGU_EGU_TNL_INT_EN = 0x1803, + HCLGE_IGU_COMMON_INT_EN = 0x1806, + HCLGE_TM_QCN_MEM_INT_CFG = 0x1A14, + HCLGE_PPP_CMD0_INT_CMD = 0x2100, + HCLGE_PPP_CMD1_INT_CMD = 0x2101, + HCLGE_MAC_ETHERTYPE_IDX_RD = 0x2105, + HCLGE_NCSI_INT_EN = 0x2401, + + /* ROH MAC commands */ + HCLGE_OPC_MAC_ADDR_CHECK = 0x9004, + + /* PHY command */ + HCLGE_OPC_PHY_LINK_KSETTING = 0x7025, + HCLGE_OPC_PHY_REG = 0x7026, + + /* Query link diagnosis info command */ + HCLGE_OPC_QUERY_LINK_DIAGNOSIS = 0x702A, +}; + +enum hclge_comm_cmd_return_status { + HCLGE_COMM_CMD_EXEC_SUCCESS = 0, + HCLGE_COMM_CMD_NO_AUTH = 1, + HCLGE_COMM_CMD_NOT_SUPPORTED = 2, + HCLGE_COMM_CMD_QUEUE_FULL = 3, + HCLGE_COMM_CMD_NEXT_ERR = 4, + HCLGE_COMM_CMD_UNEXE_ERR = 5, + HCLGE_COMM_CMD_PARA_ERR = 6, + HCLGE_COMM_CMD_RESULT_ERR = 7, + HCLGE_COMM_CMD_TIMEOUT = 8, + HCLGE_COMM_CMD_HILINK_ERR = 9, + HCLGE_COMM_CMD_QUEUE_ILLEGAL = 10, + HCLGE_COMM_CMD_INVALID = 11, +}; + +enum HCLGE_COMM_CAP_BITS { + HCLGE_COMM_CAP_UDP_GSO_B, + HCLGE_COMM_CAP_QB_B, + HCLGE_COMM_CAP_FD_FORWARD_TC_B, + HCLGE_COMM_CAP_PTP_B, + HCLGE_COMM_CAP_INT_QL_B, + HCLGE_COMM_CAP_HW_TX_CSUM_B, + HCLGE_COMM_CAP_TX_PUSH_B, + HCLGE_COMM_CAP_PHY_IMP_B, + HCLGE_COMM_CAP_TQP_TXRX_INDEP_B, + HCLGE_COMM_CAP_HW_PAD_B, + HCLGE_COMM_CAP_STASH_B, + HCLGE_COMM_CAP_UDP_TUNNEL_CSUM_B, + HCLGE_COMM_CAP_RAS_IMP_B = 12, + HCLGE_COMM_CAP_FEC_B = 13, + HCLGE_COMM_CAP_PAUSE_B = 14, + HCLGE_COMM_CAP_RXD_ADV_LAYOUT_B = 15, + HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B = 17, + HCLGE_COMM_CAP_CQ_B = 18, + HCLGE_COMM_CAP_GRO_B = 20, + HCLGE_COMM_CAP_FD_B = 21, + HCLGE_COMM_CAP_FEC_STATS_B = 25, + HCLGE_COMM_CAP_LANE_NUM_B = 27, +}; + +enum HCLGE_COMM_API_CAP_BITS { + HCLGE_COMM_API_CAP_FLEX_RSS_TBL_B, +}; + +/* capabilities bits map between imp firmware and local driver */ +struct hclge_comm_caps_bit_map { + u16 imp_bit; + u16 local_bit; +}; + +struct hclge_cmdq_tx_timeout_map { + u32 opcode; + u32 tx_timeout; +}; + +struct hclge_comm_firmware_compat_cmd { + __le32 compat; + u8 rsv[20]; +}; + +enum hclge_comm_cmd_state { + HCLGE_COMM_STATE_CMD_DISABLE, +}; + +struct hclge_comm_errcode { + u32 imp_errcode; + int common_errno; +}; + +#define HCLGE_COMM_QUERY_CAP_LENGTH 3 +struct hclge_comm_query_version_cmd { + __le32 firmware; + __le32 hardware; + __le32 api_caps; + __le32 caps[HCLGE_COMM_QUERY_CAP_LENGTH]; /* capabilities of device */ +}; + +#define HCLGE_DESC_DATA_LEN 6 +struct hclge_desc { + __le16 opcode; + __le16 flag; + __le16 retval; + __le16 rsv; + __le32 data[HCLGE_DESC_DATA_LEN]; +}; + +struct hclge_comm_cmq_ring { + dma_addr_t desc_dma_addr; + struct hclge_desc *desc; + struct pci_dev *pdev; + u32 head; + u32 tail; + + u16 buf_size; + u16 desc_num; + int next_to_use; + int next_to_clean; + u8 ring_type; /* cmq ring type */ + spinlock_t lock; /* Command queue lock */ +}; + +enum hclge_comm_cmd_status { + HCLGE_COMM_STATUS_SUCCESS = 0, + HCLGE_COMM_ERR_CSQ_FULL = -1, + HCLGE_COMM_ERR_CSQ_TIMEOUT = -2, + HCLGE_COMM_ERR_CSQ_ERROR = -3, +}; + +struct hclge_comm_cmq { + struct hclge_comm_cmq_ring csq; + struct hclge_comm_cmq_ring crq; + u16 tx_timeout; + enum hclge_comm_cmd_status last_status; +}; + +struct hclge_comm_hw { + void __iomem *io_base; + void __iomem *mem_base; + struct hclge_comm_cmq cmq; + unsigned long comm_state; +}; + +static inline void hclge_comm_write_reg(void __iomem *base, u32 reg, u32 value) +{ + writel(value, base + reg); +} + +static inline u32 hclge_comm_read_reg(u8 __iomem *base, u32 reg) +{ + u8 __iomem *reg_addr = READ_ONCE(base); + + return readl(reg_addr + reg); +} + +#define hclge_comm_write_dev(a, reg, value) \ + hclge_comm_write_reg((a)->io_base, reg, value) +#define hclge_comm_read_dev(a, reg) \ + hclge_comm_read_reg((a)->io_base, reg) + +void hclge_comm_cmd_init_regs(struct hclge_comm_hw *hw); +int hclge_comm_cmd_query_version_and_capability(struct hnae3_ae_dev *ae_dev, + struct hclge_comm_hw *hw, + u32 *fw_version, bool is_pf); +int hclge_comm_alloc_cmd_queue(struct hclge_comm_hw *hw, int ring_type); +int hclge_comm_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc, + int num); +void hclge_comm_cmd_reuse_desc(struct hclge_desc *desc, bool is_read); +int hclge_comm_firmware_compat_config(struct hnae3_ae_dev *ae_dev, + struct hclge_comm_hw *hw, bool en); +void hclge_comm_free_cmd_desc(struct hclge_comm_cmq_ring *ring); +void hclge_comm_cmd_setup_basic_desc(struct hclge_desc *desc, + enum hclge_opcode_type opcode, + bool is_read); +void hclge_comm_cmd_uninit(struct hnae3_ae_dev *ae_dev, + struct hclge_comm_hw *hw); +int hclge_comm_cmd_queue_init(struct pci_dev *pdev, struct hclge_comm_hw *hw); +int hclge_comm_cmd_init(struct hnae3_ae_dev *ae_dev, struct hclge_comm_hw *hw, + u32 *fw_version, bool is_pf, + unsigned long reset_pending); + +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c new file mode 100644 index 000000000..ae2736549 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c @@ -0,0 +1,505 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2021-2021 Hisilicon Limited. +#include + +#include "hnae3.h" +#include "hclge_comm_cmd.h" +#include "hclge_comm_rss.h" + +static const u8 hclge_comm_hash_key[] = { + 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, + 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0, + 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, + 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, + 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA +}; + +static void +hclge_comm_init_rss_tuple(struct hnae3_ae_dev *ae_dev, + struct hclge_comm_rss_tuple_cfg *rss_tuple_cfg) +{ + rss_tuple_cfg->ipv4_tcp_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER; + rss_tuple_cfg->ipv4_udp_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER; + rss_tuple_cfg->ipv4_sctp_en = HCLGE_COMM_RSS_INPUT_TUPLE_SCTP; + rss_tuple_cfg->ipv4_fragment_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER; + rss_tuple_cfg->ipv6_tcp_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER; + rss_tuple_cfg->ipv6_udp_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER; + rss_tuple_cfg->ipv6_sctp_en = + ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ? + HCLGE_COMM_RSS_INPUT_TUPLE_SCTP_NO_PORT : + HCLGE_COMM_RSS_INPUT_TUPLE_SCTP; + rss_tuple_cfg->ipv6_fragment_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER; +} + +int hclge_comm_rss_init_cfg(struct hnae3_handle *nic, + struct hnae3_ae_dev *ae_dev, + struct hclge_comm_rss_cfg *rss_cfg) +{ + u16 rss_ind_tbl_size = ae_dev->dev_specs.rss_ind_tbl_size; + int rss_algo = HCLGE_COMM_RSS_HASH_ALGO_TOEPLITZ; + u16 *rss_ind_tbl; + + if (nic->flags & HNAE3_SUPPORT_VF) + rss_cfg->rss_size = nic->kinfo.rss_size; + + if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) + rss_algo = HCLGE_COMM_RSS_HASH_ALGO_SIMPLE; + + hclge_comm_init_rss_tuple(ae_dev, &rss_cfg->rss_tuple_sets); + + rss_cfg->rss_algo = rss_algo; + + rss_ind_tbl = devm_kcalloc(&ae_dev->pdev->dev, rss_ind_tbl_size, + sizeof(*rss_ind_tbl), GFP_KERNEL); + if (!rss_ind_tbl) + return -ENOMEM; + + rss_cfg->rss_indirection_tbl = rss_ind_tbl; + memcpy(rss_cfg->rss_hash_key, hclge_comm_hash_key, + HCLGE_COMM_RSS_KEY_SIZE); + + hclge_comm_rss_indir_init_cfg(ae_dev, rss_cfg); + + return 0; +} + +void hclge_comm_get_rss_tc_info(u16 rss_size, u8 hw_tc_map, u16 *tc_offset, + u16 *tc_valid, u16 *tc_size) +{ + u16 roundup_size; + u32 i; + + roundup_size = roundup_pow_of_two(rss_size); + roundup_size = ilog2(roundup_size); + + for (i = 0; i < HCLGE_COMM_MAX_TC_NUM; i++) { + tc_valid[i] = 1; + tc_size[i] = roundup_size; + tc_offset[i] = (hw_tc_map & BIT(i)) ? rss_size * i : 0; + } +} + +int hclge_comm_set_rss_tc_mode(struct hclge_comm_hw *hw, u16 *tc_offset, + u16 *tc_valid, u16 *tc_size) +{ + struct hclge_comm_rss_tc_mode_cmd *req; + struct hclge_desc desc; + unsigned int i; + int ret; + + req = (struct hclge_comm_rss_tc_mode_cmd *)desc.data; + + hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false); + for (i = 0; i < HCLGE_COMM_MAX_TC_NUM; i++) { + u16 mode = 0; + + hnae3_set_bit(mode, HCLGE_COMM_RSS_TC_VALID_B, + (tc_valid[i] & 0x1)); + hnae3_set_field(mode, HCLGE_COMM_RSS_TC_SIZE_M, + HCLGE_COMM_RSS_TC_SIZE_S, tc_size[i]); + hnae3_set_bit(mode, HCLGE_COMM_RSS_TC_SIZE_MSB_B, + tc_size[i] >> HCLGE_COMM_RSS_TC_SIZE_MSB_OFFSET & + 0x1); + hnae3_set_field(mode, HCLGE_COMM_RSS_TC_OFFSET_M, + HCLGE_COMM_RSS_TC_OFFSET_S, tc_offset[i]); + + req->rss_tc_mode[i] = cpu_to_le16(mode); + } + + ret = hclge_comm_cmd_send(hw, &desc, 1); + if (ret) + dev_err(&hw->cmq.csq.pdev->dev, + "failed to set rss tc mode, ret = %d.\n", ret); + + return ret; +} + +int hclge_comm_set_rss_hash_key(struct hclge_comm_rss_cfg *rss_cfg, + struct hclge_comm_hw *hw, const u8 *key, + const u8 hfunc) +{ + u8 hash_algo; + int ret; + + ret = hclge_comm_parse_rss_hfunc(rss_cfg, hfunc, &hash_algo); + if (ret) + return ret; + + /* Set the RSS Hash Key if specififed by the user */ + if (key) { + ret = hclge_comm_set_rss_algo_key(hw, hash_algo, key); + if (ret) + return ret; + + /* Update the shadow RSS key with user specified qids */ + memcpy(rss_cfg->rss_hash_key, key, HCLGE_COMM_RSS_KEY_SIZE); + } else { + ret = hclge_comm_set_rss_algo_key(hw, hash_algo, + rss_cfg->rss_hash_key); + if (ret) + return ret; + } + rss_cfg->rss_algo = hash_algo; + + return 0; +} + +int hclge_comm_set_rss_tuple(struct hnae3_ae_dev *ae_dev, + struct hclge_comm_hw *hw, + struct hclge_comm_rss_cfg *rss_cfg, + struct ethtool_rxnfc *nfc) +{ + struct hclge_comm_rss_input_tuple_cmd *req; + struct hclge_desc desc; + int ret; + + if (nfc->data & + ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) + return -EINVAL; + + req = (struct hclge_comm_rss_input_tuple_cmd *)desc.data; + hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, + false); + + ret = hclge_comm_init_rss_tuple_cmd(rss_cfg, nfc, ae_dev, req); + if (ret) { + dev_err(&hw->cmq.csq.pdev->dev, + "failed to init rss tuple cmd, ret = %d.\n", ret); + return ret; + } + + ret = hclge_comm_cmd_send(hw, &desc, 1); + if (ret) { + dev_err(&hw->cmq.csq.pdev->dev, + "failed to set rss tuple, ret = %d.\n", ret); + return ret; + } + + rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; + rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; + rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; + rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; + rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; + rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; + rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; + rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; + return 0; +} + +u32 hclge_comm_get_rss_key_size(struct hnae3_handle *handle) +{ + return HCLGE_COMM_RSS_KEY_SIZE; +} + +int hclge_comm_parse_rss_hfunc(struct hclge_comm_rss_cfg *rss_cfg, + const u8 hfunc, u8 *hash_algo) +{ + switch (hfunc) { + case ETH_RSS_HASH_TOP: + *hash_algo = HCLGE_COMM_RSS_HASH_ALGO_TOEPLITZ; + return 0; + case ETH_RSS_HASH_XOR: + *hash_algo = HCLGE_COMM_RSS_HASH_ALGO_SIMPLE; + return 0; + case ETH_RSS_HASH_NO_CHANGE: + *hash_algo = rss_cfg->rss_algo; + return 0; + default: + return -EINVAL; + } +} + +void hclge_comm_rss_indir_init_cfg(struct hnae3_ae_dev *ae_dev, + struct hclge_comm_rss_cfg *rss_cfg) +{ + u16 i; + /* Initialize RSS indirect table */ + for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++) + rss_cfg->rss_indirection_tbl[i] = i % rss_cfg->rss_size; +} + +int hclge_comm_get_rss_tuple(struct hclge_comm_rss_cfg *rss_cfg, int flow_type, + u8 *tuple_sets) +{ + switch (flow_type) { + case TCP_V4_FLOW: + *tuple_sets = rss_cfg->rss_tuple_sets.ipv4_tcp_en; + break; + case UDP_V4_FLOW: + *tuple_sets = rss_cfg->rss_tuple_sets.ipv4_udp_en; + break; + case TCP_V6_FLOW: + *tuple_sets = rss_cfg->rss_tuple_sets.ipv6_tcp_en; + break; + case UDP_V6_FLOW: + *tuple_sets = rss_cfg->rss_tuple_sets.ipv6_udp_en; + break; + case SCTP_V4_FLOW: + *tuple_sets = rss_cfg->rss_tuple_sets.ipv4_sctp_en; + break; + case SCTP_V6_FLOW: + *tuple_sets = rss_cfg->rss_tuple_sets.ipv6_sctp_en; + break; + case IPV4_FLOW: + case IPV6_FLOW: + *tuple_sets = HCLGE_COMM_S_IP_BIT | HCLGE_COMM_D_IP_BIT; + break; + default: + return -EINVAL; + } + + return 0; +} + +static void +hclge_comm_append_rss_msb_info(struct hclge_comm_rss_ind_tbl_cmd *req, + u16 qid, u32 j) +{ + u8 rss_msb_oft; + u8 rss_msb_val; + + rss_msb_oft = + j * HCLGE_COMM_RSS_CFG_TBL_BW_H / BITS_PER_BYTE; + rss_msb_val = (qid >> HCLGE_COMM_RSS_CFG_TBL_BW_L & 0x1) << + (j * HCLGE_COMM_RSS_CFG_TBL_BW_H % BITS_PER_BYTE); + req->rss_qid_h[rss_msb_oft] |= rss_msb_val; +} + +int hclge_comm_set_rss_indir_table(struct hnae3_ae_dev *ae_dev, + struct hclge_comm_hw *hw, const u16 *indir) +{ + struct hclge_comm_rss_ind_tbl_cmd *req; + struct hclge_desc desc; + u16 rss_cfg_tbl_num; + int ret; + u16 qid; + u16 i; + u32 j; + + req = (struct hclge_comm_rss_ind_tbl_cmd *)desc.data; + rss_cfg_tbl_num = ae_dev->dev_specs.rss_ind_tbl_size / + HCLGE_COMM_RSS_CFG_TBL_SIZE; + + for (i = 0; i < rss_cfg_tbl_num; i++) { + hclge_comm_cmd_setup_basic_desc(&desc, + HCLGE_OPC_RSS_INDIR_TABLE, + false); + + req->start_table_index = + cpu_to_le16(i * HCLGE_COMM_RSS_CFG_TBL_SIZE); + req->rss_set_bitmap = + cpu_to_le16(HCLGE_COMM_RSS_SET_BITMAP_MSK); + for (j = 0; j < HCLGE_COMM_RSS_CFG_TBL_SIZE; j++) { + qid = indir[i * HCLGE_COMM_RSS_CFG_TBL_SIZE + j]; + req->rss_qid_l[j] = qid & 0xff; + hclge_comm_append_rss_msb_info(req, qid, j); + } + ret = hclge_comm_cmd_send(hw, &desc, 1); + if (ret) { + dev_err(&hw->cmq.csq.pdev->dev, + "failed to configure rss table, ret = %d.\n", + ret); + return ret; + } + } + return 0; +} + +int hclge_comm_set_rss_input_tuple(struct hnae3_handle *nic, + struct hclge_comm_hw *hw, bool is_pf, + struct hclge_comm_rss_cfg *rss_cfg) +{ + struct hclge_comm_rss_input_tuple_cmd *req; + struct hclge_desc desc; + int ret; + + hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, + false); + + req = (struct hclge_comm_rss_input_tuple_cmd *)desc.data; + + req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; + req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; + req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; + req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; + req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; + req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; + req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; + req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; + + ret = hclge_comm_cmd_send(hw, &desc, 1); + if (ret) + dev_err(&hw->cmq.csq.pdev->dev, + "failed to configure rss input, ret = %d.\n", ret); + return ret; +} + +void hclge_comm_get_rss_hash_info(struct hclge_comm_rss_cfg *rss_cfg, u8 *key, + u8 *hfunc) +{ + /* Get hash algorithm */ + if (hfunc) { + switch (rss_cfg->rss_algo) { + case HCLGE_COMM_RSS_HASH_ALGO_TOEPLITZ: + *hfunc = ETH_RSS_HASH_TOP; + break; + case HCLGE_COMM_RSS_HASH_ALGO_SIMPLE: + *hfunc = ETH_RSS_HASH_XOR; + break; + default: + *hfunc = ETH_RSS_HASH_UNKNOWN; + break; + } + } + + /* Get the RSS Key required by the user */ + if (key) + memcpy(key, rss_cfg->rss_hash_key, HCLGE_COMM_RSS_KEY_SIZE); +} + +void hclge_comm_get_rss_indir_tbl(struct hclge_comm_rss_cfg *rss_cfg, + u32 *indir, u16 rss_ind_tbl_size) +{ + u16 i; + + if (!indir) + return; + + for (i = 0; i < rss_ind_tbl_size; i++) + indir[i] = rss_cfg->rss_indirection_tbl[i]; +} + +int hclge_comm_set_rss_algo_key(struct hclge_comm_hw *hw, const u8 hfunc, + const u8 *key) +{ + struct hclge_comm_rss_config_cmd *req; + unsigned int key_offset = 0; + struct hclge_desc desc; + int key_counts; + int key_size; + int ret; + + key_counts = HCLGE_COMM_RSS_KEY_SIZE; + req = (struct hclge_comm_rss_config_cmd *)desc.data; + + while (key_counts) { + hclge_comm_cmd_setup_basic_desc(&desc, + HCLGE_OPC_RSS_GENERIC_CONFIG, + false); + + req->hash_config |= (hfunc & HCLGE_COMM_RSS_HASH_ALGO_MASK); + req->hash_config |= + (key_offset << HCLGE_COMM_RSS_HASH_KEY_OFFSET_B); + + key_size = min(HCLGE_COMM_RSS_HASH_KEY_NUM, key_counts); + memcpy(req->hash_key, + key + key_offset * HCLGE_COMM_RSS_HASH_KEY_NUM, + key_size); + + key_counts -= key_size; + key_offset++; + ret = hclge_comm_cmd_send(hw, &desc, 1); + if (ret) { + dev_err(&hw->cmq.csq.pdev->dev, + "failed to configure RSS key, ret = %d.\n", + ret); + return ret; + } + } + + return 0; +} + +static u8 hclge_comm_get_rss_hash_bits(struct ethtool_rxnfc *nfc) +{ + u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_COMM_S_PORT_BIT : 0; + + if (nfc->data & RXH_L4_B_2_3) + hash_sets |= HCLGE_COMM_D_PORT_BIT; + else + hash_sets &= ~HCLGE_COMM_D_PORT_BIT; + + if (nfc->data & RXH_IP_SRC) + hash_sets |= HCLGE_COMM_S_IP_BIT; + else + hash_sets &= ~HCLGE_COMM_S_IP_BIT; + + if (nfc->data & RXH_IP_DST) + hash_sets |= HCLGE_COMM_D_IP_BIT; + else + hash_sets &= ~HCLGE_COMM_D_IP_BIT; + + if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) + hash_sets |= HCLGE_COMM_V_TAG_BIT; + + return hash_sets; +} + +int hclge_comm_init_rss_tuple_cmd(struct hclge_comm_rss_cfg *rss_cfg, + struct ethtool_rxnfc *nfc, + struct hnae3_ae_dev *ae_dev, + struct hclge_comm_rss_input_tuple_cmd *req) +{ + u8 tuple_sets; + + req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; + req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; + req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; + req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; + req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; + req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; + req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; + req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; + + tuple_sets = hclge_comm_get_rss_hash_bits(nfc); + switch (nfc->flow_type) { + case TCP_V4_FLOW: + req->ipv4_tcp_en = tuple_sets; + break; + case TCP_V6_FLOW: + req->ipv6_tcp_en = tuple_sets; + break; + case UDP_V4_FLOW: + req->ipv4_udp_en = tuple_sets; + break; + case UDP_V6_FLOW: + req->ipv6_udp_en = tuple_sets; + break; + case SCTP_V4_FLOW: + req->ipv4_sctp_en = tuple_sets; + break; + case SCTP_V6_FLOW: + if (ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 && + (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3))) + return -EINVAL; + + req->ipv6_sctp_en = tuple_sets; + break; + case IPV4_FLOW: + req->ipv4_fragment_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER; + break; + case IPV6_FLOW: + req->ipv6_fragment_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER; + break; + default: + return -EINVAL; + } + + return 0; +} + +u64 hclge_comm_convert_rss_tuple(u8 tuple_sets) +{ + u64 tuple_data = 0; + + if (tuple_sets & HCLGE_COMM_D_PORT_BIT) + tuple_data |= RXH_L4_B_2_3; + if (tuple_sets & HCLGE_COMM_S_PORT_BIT) + tuple_data |= RXH_L4_B_0_1; + if (tuple_sets & HCLGE_COMM_D_IP_BIT) + tuple_data |= RXH_IP_DST; + if (tuple_sets & HCLGE_COMM_S_IP_BIT) + tuple_data |= RXH_IP_SRC; + + return tuple_data; +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h new file mode 100644 index 000000000..92af3d298 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h @@ -0,0 +1,134 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +// Copyright (c) 2021-2021 Hisilicon Limited. + +#ifndef __HCLGE_COMM_RSS_H +#define __HCLGE_COMM_RSS_H +#include + +#include "hnae3.h" +#include "hclge_comm_cmd.h" + +#define HCLGE_COMM_RSS_HASH_ALGO_TOEPLITZ 0 +#define HCLGE_COMM_RSS_HASH_ALGO_SIMPLE 1 +#define HCLGE_COMM_RSS_HASH_ALGO_SYMMETRIC 2 + +#define HCLGE_COMM_RSS_INPUT_TUPLE_OTHER GENMASK(3, 0) +#define HCLGE_COMM_RSS_INPUT_TUPLE_SCTP GENMASK(4, 0) + +#define HCLGE_COMM_D_PORT_BIT BIT(0) +#define HCLGE_COMM_S_PORT_BIT BIT(1) +#define HCLGE_COMM_D_IP_BIT BIT(2) +#define HCLGE_COMM_S_IP_BIT BIT(3) +#define HCLGE_COMM_V_TAG_BIT BIT(4) +#define HCLGE_COMM_RSS_INPUT_TUPLE_SCTP_NO_PORT \ + (HCLGE_COMM_D_IP_BIT | HCLGE_COMM_S_IP_BIT | HCLGE_COMM_V_TAG_BIT) +#define HCLGE_COMM_MAX_TC_NUM 8 + +#define HCLGE_COMM_RSS_TC_OFFSET_S 0 +#define HCLGE_COMM_RSS_TC_OFFSET_M GENMASK(10, 0) +#define HCLGE_COMM_RSS_TC_SIZE_MSB_B 11 +#define HCLGE_COMM_RSS_TC_SIZE_S 12 +#define HCLGE_COMM_RSS_TC_SIZE_M GENMASK(14, 12) +#define HCLGE_COMM_RSS_TC_VALID_B 15 +#define HCLGE_COMM_RSS_TC_SIZE_MSB_OFFSET 3 + +struct hclge_comm_rss_tuple_cfg { + u8 ipv4_tcp_en; + u8 ipv4_udp_en; + u8 ipv4_sctp_en; + u8 ipv4_fragment_en; + u8 ipv6_tcp_en; + u8 ipv6_udp_en; + u8 ipv6_sctp_en; + u8 ipv6_fragment_en; +}; + +#define HCLGE_COMM_RSS_KEY_SIZE 40 +#define HCLGE_COMM_RSS_CFG_TBL_SIZE 16 +#define HCLGE_COMM_RSS_CFG_TBL_BW_H 2U +#define HCLGE_COMM_RSS_CFG_TBL_BW_L 8U +#define HCLGE_COMM_RSS_CFG_TBL_SIZE_H 4 +#define HCLGE_COMM_RSS_SET_BITMAP_MSK GENMASK(15, 0) +#define HCLGE_COMM_RSS_HASH_ALGO_MASK GENMASK(3, 0) +#define HCLGE_COMM_RSS_HASH_KEY_OFFSET_B 4 + +#define HCLGE_COMM_RSS_HASH_KEY_NUM 16 +struct hclge_comm_rss_config_cmd { + u8 hash_config; + u8 rsv[7]; + u8 hash_key[HCLGE_COMM_RSS_HASH_KEY_NUM]; +}; + +struct hclge_comm_rss_cfg { + u8 rss_hash_key[HCLGE_COMM_RSS_KEY_SIZE]; /* user configured hash keys */ + + /* shadow table */ + u16 *rss_indirection_tbl; + u32 rss_algo; + + struct hclge_comm_rss_tuple_cfg rss_tuple_sets; + u32 rss_size; +}; + +struct hclge_comm_rss_input_tuple_cmd { + u8 ipv4_tcp_en; + u8 ipv4_udp_en; + u8 ipv4_sctp_en; + u8 ipv4_fragment_en; + u8 ipv6_tcp_en; + u8 ipv6_udp_en; + u8 ipv6_sctp_en; + u8 ipv6_fragment_en; + u8 rsv[16]; +}; + +struct hclge_comm_rss_ind_tbl_cmd { + __le16 start_table_index; + __le16 rss_set_bitmap; + u8 rss_qid_h[HCLGE_COMM_RSS_CFG_TBL_SIZE_H]; + u8 rss_qid_l[HCLGE_COMM_RSS_CFG_TBL_SIZE]; +}; + +struct hclge_comm_rss_tc_mode_cmd { + __le16 rss_tc_mode[HCLGE_COMM_MAX_TC_NUM]; + u8 rsv[8]; +}; + +u32 hclge_comm_get_rss_key_size(struct hnae3_handle *handle); +void hclge_comm_rss_indir_init_cfg(struct hnae3_ae_dev *ae_dev, + struct hclge_comm_rss_cfg *rss_cfg); +int hclge_comm_get_rss_tuple(struct hclge_comm_rss_cfg *rss_cfg, int flow_type, + u8 *tuple_sets); +int hclge_comm_parse_rss_hfunc(struct hclge_comm_rss_cfg *rss_cfg, + const u8 hfunc, u8 *hash_algo); +void hclge_comm_get_rss_hash_info(struct hclge_comm_rss_cfg *rss_cfg, u8 *key, + u8 *hfunc); +void hclge_comm_get_rss_indir_tbl(struct hclge_comm_rss_cfg *rss_cfg, + u32 *indir, u16 rss_ind_tbl_size); +int hclge_comm_set_rss_algo_key(struct hclge_comm_hw *hw, const u8 hfunc, + const u8 *key); +int hclge_comm_init_rss_tuple_cmd(struct hclge_comm_rss_cfg *rss_cfg, + struct ethtool_rxnfc *nfc, + struct hnae3_ae_dev *ae_dev, + struct hclge_comm_rss_input_tuple_cmd *req); +u64 hclge_comm_convert_rss_tuple(u8 tuple_sets); +int hclge_comm_set_rss_input_tuple(struct hnae3_handle *nic, + struct hclge_comm_hw *hw, bool is_pf, + struct hclge_comm_rss_cfg *rss_cfg); +int hclge_comm_set_rss_indir_table(struct hnae3_ae_dev *ae_dev, + struct hclge_comm_hw *hw, const u16 *indir); +int hclge_comm_rss_init_cfg(struct hnae3_handle *nic, + struct hnae3_ae_dev *ae_dev, + struct hclge_comm_rss_cfg *rss_cfg); +void hclge_comm_get_rss_tc_info(u16 rss_size, u8 hw_tc_map, u16 *tc_offset, + u16 *tc_valid, u16 *tc_size); +int hclge_comm_set_rss_tc_mode(struct hclge_comm_hw *hw, u16 *tc_offset, + u16 *tc_valid, u16 *tc_size); +int hclge_comm_set_rss_hash_key(struct hclge_comm_rss_cfg *rss_cfg, + struct hclge_comm_hw *hw, const u8 *key, + const u8 hfunc); +int hclge_comm_set_rss_tuple(struct hnae3_ae_dev *ae_dev, + struct hclge_comm_hw *hw, + struct hclge_comm_rss_cfg *rss_cfg, + struct ethtool_rxnfc *nfc); +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c new file mode 100644 index 000000000..f3c9395d8 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c @@ -0,0 +1,115 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2021-2021 Hisilicon Limited. + +#include + +#include "hnae3.h" +#include "hclge_comm_cmd.h" +#include "hclge_comm_tqp_stats.h" + +u64 *hclge_comm_tqps_get_stats(struct hnae3_handle *handle, u64 *data) +{ + struct hnae3_knic_private_info *kinfo = &handle->kinfo; + struct hclge_comm_tqp *tqp; + u64 *buff = data; + u16 i; + + for (i = 0; i < kinfo->num_tqps; i++) { + tqp = container_of(kinfo->tqp[i], struct hclge_comm_tqp, q); + *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; + } + + for (i = 0; i < kinfo->num_tqps; i++) { + tqp = container_of(kinfo->tqp[i], struct hclge_comm_tqp, q); + *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; + } + + return buff; +} + +int hclge_comm_tqps_get_sset_count(struct hnae3_handle *handle) +{ + struct hnae3_knic_private_info *kinfo = &handle->kinfo; + + return kinfo->num_tqps * HCLGE_COMM_QUEUE_PAIR_SIZE; +} + +u8 *hclge_comm_tqps_get_strings(struct hnae3_handle *handle, u8 *data) +{ + struct hnae3_knic_private_info *kinfo = &handle->kinfo; + u8 *buff = data; + u16 i; + + for (i = 0; i < kinfo->num_tqps; i++) { + struct hclge_comm_tqp *tqp = + container_of(kinfo->tqp[i], struct hclge_comm_tqp, q); + snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd", tqp->index); + buff += ETH_GSTRING_LEN; + } + + for (i = 0; i < kinfo->num_tqps; i++) { + struct hclge_comm_tqp *tqp = + container_of(kinfo->tqp[i], struct hclge_comm_tqp, q); + snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd", tqp->index); + buff += ETH_GSTRING_LEN; + } + + return buff; +} + +int hclge_comm_tqps_update_stats(struct hnae3_handle *handle, + struct hclge_comm_hw *hw) +{ + struct hnae3_knic_private_info *kinfo = &handle->kinfo; + struct hclge_comm_tqp *tqp; + struct hclge_desc desc; + int ret; + u16 i; + + for (i = 0; i < kinfo->num_tqps; i++) { + tqp = container_of(kinfo->tqp[i], struct hclge_comm_tqp, q); + hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_RX_STATS, + true); + + desc.data[0] = cpu_to_le32(tqp->index); + ret = hclge_comm_cmd_send(hw, &desc, 1); + if (ret) { + dev_err(&hw->cmq.csq.pdev->dev, + "failed to get tqp stat, ret = %d, rx = %u.\n", + ret, i); + return ret; + } + tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += + le32_to_cpu(desc.data[1]); + + hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_TX_STATS, + true); + + desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); + ret = hclge_comm_cmd_send(hw, &desc, 1); + if (ret) { + dev_err(&hw->cmq.csq.pdev->dev, + "failed to get tqp stat, ret = %d, tx = %u.\n", + ret, i); + return ret; + } + tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += + le32_to_cpu(desc.data[1]); + } + + return 0; +} + +void hclge_comm_reset_tqp_stats(struct hnae3_handle *handle) +{ + struct hnae3_knic_private_info *kinfo = &handle->kinfo; + struct hclge_comm_tqp *tqp; + struct hnae3_queue *queue; + u16 i; + + for (i = 0; i < kinfo->num_tqps; i++) { + queue = kinfo->tqp[i]; + tqp = container_of(queue, struct hclge_comm_tqp, q); + memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); + } +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.h b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.h new file mode 100644 index 000000000..a46350162 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +// Copyright (c) 2021-2021 Hisilicon Limited. + +#ifndef __HCLGE_COMM_TQP_STATS_H +#define __HCLGE_COMM_TQP_STATS_H +#include +#include +#include "hnae3.h" + +/* each tqp has TX & RX two queues */ +#define HCLGE_COMM_QUEUE_PAIR_SIZE 2 + +/* TQP stats */ +struct hclge_comm_tqp_stats { + /* query_tqp_tx_queue_statistics ,opcode id: 0x0B03 */ + u64 rcb_tx_ring_pktnum_rcd; /* 32bit */ + /* query_tqp_rx_queue_statistics ,opcode id: 0x0B13 */ + u64 rcb_rx_ring_pktnum_rcd; /* 32bit */ +}; + +struct hclge_comm_tqp { + /* copy of device pointer from pci_dev, + * used when perform DMA mapping + */ + struct device *dev; + struct hnae3_queue q; + struct hclge_comm_tqp_stats tqp_stats; + u16 index; /* Global index in a NIC controller */ + + bool alloced; +}; + +u64 *hclge_comm_tqps_get_stats(struct hnae3_handle *handle, u64 *data); +int hclge_comm_tqps_get_sset_count(struct hnae3_handle *handle); +u8 *hclge_comm_tqps_get_strings(struct hnae3_handle *handle, u8 *data); +void hclge_comm_reset_tqp_stats(struct hnae3_handle *handle); +int hclge_comm_tqps_update_stats(struct hnae3_handle *handle, + struct hclge_comm_hw *hw); +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c new file mode 100644 index 000000000..3b6dbf158 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c @@ -0,0 +1,130 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. + +#include "hnae3.h" +#include "hns3_enet.h" + +static int hns3_dcbnl_ieee_getets(struct net_device *ndev, struct ieee_ets *ets) +{ + struct hnae3_handle *h = hns3_get_handle(ndev); + + if (hns3_nic_resetting(ndev)) + return -EBUSY; + + if (h->kinfo.dcb_ops->ieee_getets) + return h->kinfo.dcb_ops->ieee_getets(h, ets); + + return -EOPNOTSUPP; +} + +static int hns3_dcbnl_ieee_setets(struct net_device *ndev, struct ieee_ets *ets) +{ + struct hnae3_handle *h = hns3_get_handle(ndev); + + if (hns3_nic_resetting(ndev)) + return -EBUSY; + + if (h->kinfo.dcb_ops->ieee_setets) + return h->kinfo.dcb_ops->ieee_setets(h, ets); + + return -EOPNOTSUPP; +} + +static int hns3_dcbnl_ieee_getpfc(struct net_device *ndev, struct ieee_pfc *pfc) +{ + struct hnae3_handle *h = hns3_get_handle(ndev); + + if (hns3_nic_resetting(ndev)) + return -EBUSY; + + if (h->kinfo.dcb_ops->ieee_getpfc) + return h->kinfo.dcb_ops->ieee_getpfc(h, pfc); + + return -EOPNOTSUPP; +} + +static int hns3_dcbnl_ieee_setpfc(struct net_device *ndev, struct ieee_pfc *pfc) +{ + struct hnae3_handle *h = hns3_get_handle(ndev); + + if (hns3_nic_resetting(ndev)) + return -EBUSY; + + if (h->kinfo.dcb_ops->ieee_setpfc) + return h->kinfo.dcb_ops->ieee_setpfc(h, pfc); + + return -EOPNOTSUPP; +} + +static int hns3_dcbnl_ieee_setapp(struct net_device *ndev, struct dcb_app *app) +{ + struct hnae3_handle *h = hns3_get_handle(ndev); + + if (hns3_nic_resetting(ndev)) + return -EBUSY; + + if (h->kinfo.dcb_ops->ieee_setapp) + return h->kinfo.dcb_ops->ieee_setapp(h, app); + + return -EOPNOTSUPP; +} + +static int hns3_dcbnl_ieee_delapp(struct net_device *ndev, struct dcb_app *app) +{ + struct hnae3_handle *h = hns3_get_handle(ndev); + + if (hns3_nic_resetting(ndev)) + return -EBUSY; + + if (h->kinfo.dcb_ops->ieee_setapp) + return h->kinfo.dcb_ops->ieee_delapp(h, app); + + return -EOPNOTSUPP; +} + +/* DCBX configuration */ +static u8 hns3_dcbnl_getdcbx(struct net_device *ndev) +{ + struct hnae3_handle *h = hns3_get_handle(ndev); + + if (h->kinfo.dcb_ops->getdcbx) + return h->kinfo.dcb_ops->getdcbx(h); + + return 0; +} + +/* return 0 if successful, otherwise fail */ +static u8 hns3_dcbnl_setdcbx(struct net_device *ndev, u8 mode) +{ + struct hnae3_handle *h = hns3_get_handle(ndev); + + if (h->kinfo.dcb_ops->setdcbx) + return h->kinfo.dcb_ops->setdcbx(h, mode); + + return 1; +} + +static const struct dcbnl_rtnl_ops hns3_dcbnl_ops = { + .ieee_getets = hns3_dcbnl_ieee_getets, + .ieee_setets = hns3_dcbnl_ieee_setets, + .ieee_getpfc = hns3_dcbnl_ieee_getpfc, + .ieee_setpfc = hns3_dcbnl_ieee_setpfc, + .ieee_setapp = hns3_dcbnl_ieee_setapp, + .ieee_delapp = hns3_dcbnl_ieee_delapp, + .getdcbx = hns3_dcbnl_getdcbx, + .setdcbx = hns3_dcbnl_setdcbx, +}; + +/* hclge_dcbnl_setup - DCBNL setup + * @handle: the corresponding vport handle + * Set up DCBNL + */ +void hns3_dcbnl_setup(struct hnae3_handle *handle) +{ + struct net_device *dev = handle->kinfo.netdev; + + if ((!handle->kinfo.dcb_ops) || (handle->flags & HNAE3_SUPPORT_VF)) + return; + + dev->dcbnl_ops = &hns3_dcbnl_ops; +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c new file mode 100644 index 000000000..d2603cfc1 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c @@ -0,0 +1,1443 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2018-2019 Hisilicon Limited. */ + +#include +#include + +#include "hnae3.h" +#include "hns3_debugfs.h" +#include "hns3_enet.h" + +static struct dentry *hns3_dbgfs_root; + +static struct hns3_dbg_dentry_info hns3_dbg_dentry[] = { + { + .name = "tm" + }, + { + .name = "tx_bd_info" + }, + { + .name = "rx_bd_info" + }, + { + .name = "mac_list" + }, + { + .name = "reg" + }, + { + .name = "queue" + }, + { + .name = "fd" + }, + /* keep common at the bottom and add new directory above */ + { + .name = "common" + }, +}; + +static int hns3_dbg_bd_file_init(struct hnae3_handle *handle, u32 cmd); +static int hns3_dbg_common_file_init(struct hnae3_handle *handle, u32 cmd); + +static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = { + { + .name = "tm_nodes", + .cmd = HNAE3_DBG_CMD_TM_NODES, + .dentry = HNS3_DBG_DENTRY_TM, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, + { + .name = "tm_priority", + .cmd = HNAE3_DBG_CMD_TM_PRI, + .dentry = HNS3_DBG_DENTRY_TM, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, + { + .name = "tm_qset", + .cmd = HNAE3_DBG_CMD_TM_QSET, + .dentry = HNS3_DBG_DENTRY_TM, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, + { + .name = "tm_map", + .cmd = HNAE3_DBG_CMD_TM_MAP, + .dentry = HNS3_DBG_DENTRY_TM, + .buf_len = HNS3_DBG_READ_LEN_1MB, + .init = hns3_dbg_common_file_init, + }, + { + .name = "tm_pg", + .cmd = HNAE3_DBG_CMD_TM_PG, + .dentry = HNS3_DBG_DENTRY_TM, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, + { + .name = "tm_port", + .cmd = HNAE3_DBG_CMD_TM_PORT, + .dentry = HNS3_DBG_DENTRY_TM, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, + { + .name = "tc_sch_info", + .cmd = HNAE3_DBG_CMD_TC_SCH_INFO, + .dentry = HNS3_DBG_DENTRY_TM, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, + { + .name = "qos_pause_cfg", + .cmd = HNAE3_DBG_CMD_QOS_PAUSE_CFG, + .dentry = HNS3_DBG_DENTRY_TM, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, + { + .name = "qos_pri_map", + .cmd = HNAE3_DBG_CMD_QOS_PRI_MAP, + .dentry = HNS3_DBG_DENTRY_TM, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, + { + .name = "qos_dscp_map", + .cmd = HNAE3_DBG_CMD_QOS_DSCP_MAP, + .dentry = HNS3_DBG_DENTRY_TM, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, + { + .name = "qos_buf_cfg", + .cmd = HNAE3_DBG_CMD_QOS_BUF_CFG, + .dentry = HNS3_DBG_DENTRY_TM, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, + { + .name = "dev_info", + .cmd = HNAE3_DBG_CMD_DEV_INFO, + .dentry = HNS3_DBG_DENTRY_COMMON, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, + { + .name = "tx_bd_queue", + .cmd = HNAE3_DBG_CMD_TX_BD, + .dentry = HNS3_DBG_DENTRY_TX_BD, + .buf_len = HNS3_DBG_READ_LEN_5MB, + .init = hns3_dbg_bd_file_init, + }, + { + .name = "rx_bd_queue", + .cmd = HNAE3_DBG_CMD_RX_BD, + .dentry = HNS3_DBG_DENTRY_RX_BD, + .buf_len = HNS3_DBG_READ_LEN_4MB, + .init = hns3_dbg_bd_file_init, + }, + { + .name = "uc", + .cmd = HNAE3_DBG_CMD_MAC_UC, + .dentry = HNS3_DBG_DENTRY_MAC, + .buf_len = HNS3_DBG_READ_LEN_128KB, + .init = hns3_dbg_common_file_init, + }, + { + .name = "mc", + .cmd = HNAE3_DBG_CMD_MAC_MC, + .dentry = HNS3_DBG_DENTRY_MAC, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, + { + .name = "mng_tbl", + .cmd = HNAE3_DBG_CMD_MNG_TBL, + .dentry = HNS3_DBG_DENTRY_COMMON, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, + { + .name = "loopback", + .cmd = HNAE3_DBG_CMD_LOOPBACK, + .dentry = HNS3_DBG_DENTRY_COMMON, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, + { + .name = "interrupt_info", + .cmd = HNAE3_DBG_CMD_INTERRUPT_INFO, + .dentry = HNS3_DBG_DENTRY_COMMON, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, + { + .name = "reset_info", + .cmd = HNAE3_DBG_CMD_RESET_INFO, + .dentry = HNS3_DBG_DENTRY_COMMON, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, + { + .name = "imp_info", + .cmd = HNAE3_DBG_CMD_IMP_INFO, + .dentry = HNS3_DBG_DENTRY_COMMON, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, + { + .name = "ncl_config", + .cmd = HNAE3_DBG_CMD_NCL_CONFIG, + .dentry = HNS3_DBG_DENTRY_COMMON, + .buf_len = HNS3_DBG_READ_LEN_128KB, + .init = hns3_dbg_common_file_init, + }, + { + .name = "mac_tnl_status", + .cmd = HNAE3_DBG_CMD_MAC_TNL_STATUS, + .dentry = HNS3_DBG_DENTRY_COMMON, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, + { + .name = "bios_common", + .cmd = HNAE3_DBG_CMD_REG_BIOS_COMMON, + .dentry = HNS3_DBG_DENTRY_REG, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, + { + .name = "ssu", + .cmd = HNAE3_DBG_CMD_REG_SSU, + .dentry = HNS3_DBG_DENTRY_REG, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, + { + .name = "igu_egu", + .cmd = HNAE3_DBG_CMD_REG_IGU_EGU, + .dentry = HNS3_DBG_DENTRY_REG, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, + { + .name = "rpu", + .cmd = HNAE3_DBG_CMD_REG_RPU, + .dentry = HNS3_DBG_DENTRY_REG, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, + { + .name = "ncsi", + .cmd = HNAE3_DBG_CMD_REG_NCSI, + .dentry = HNS3_DBG_DENTRY_REG, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, + { + .name = "rtc", + .cmd = HNAE3_DBG_CMD_REG_RTC, + .dentry = HNS3_DBG_DENTRY_REG, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, + { + .name = "ppp", + .cmd = HNAE3_DBG_CMD_REG_PPP, + .dentry = HNS3_DBG_DENTRY_REG, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, + { + .name = "rcb", + .cmd = HNAE3_DBG_CMD_REG_RCB, + .dentry = HNS3_DBG_DENTRY_REG, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, + { + .name = "tqp", + .cmd = HNAE3_DBG_CMD_REG_TQP, + .dentry = HNS3_DBG_DENTRY_REG, + .buf_len = HNS3_DBG_READ_LEN_128KB, + .init = hns3_dbg_common_file_init, + }, + { + .name = "mac", + .cmd = HNAE3_DBG_CMD_REG_MAC, + .dentry = HNS3_DBG_DENTRY_REG, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, + { + .name = "dcb", + .cmd = HNAE3_DBG_CMD_REG_DCB, + .dentry = HNS3_DBG_DENTRY_REG, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, + { + .name = "queue_map", + .cmd = HNAE3_DBG_CMD_QUEUE_MAP, + .dentry = HNS3_DBG_DENTRY_QUEUE, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, + { + .name = "rx_queue_info", + .cmd = HNAE3_DBG_CMD_RX_QUEUE_INFO, + .dentry = HNS3_DBG_DENTRY_QUEUE, + .buf_len = HNS3_DBG_READ_LEN_1MB, + .init = hns3_dbg_common_file_init, + }, + { + .name = "tx_queue_info", + .cmd = HNAE3_DBG_CMD_TX_QUEUE_INFO, + .dentry = HNS3_DBG_DENTRY_QUEUE, + .buf_len = HNS3_DBG_READ_LEN_1MB, + .init = hns3_dbg_common_file_init, + }, + { + .name = "fd_tcam", + .cmd = HNAE3_DBG_CMD_FD_TCAM, + .dentry = HNS3_DBG_DENTRY_FD, + .buf_len = HNS3_DBG_READ_LEN_1MB, + .init = hns3_dbg_common_file_init, + }, + { + .name = "service_task_info", + .cmd = HNAE3_DBG_CMD_SERV_INFO, + .dentry = HNS3_DBG_DENTRY_COMMON, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, + { + .name = "vlan_config", + .cmd = HNAE3_DBG_CMD_VLAN_CONFIG, + .dentry = HNS3_DBG_DENTRY_COMMON, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, + { + .name = "ptp_info", + .cmd = HNAE3_DBG_CMD_PTP_INFO, + .dentry = HNS3_DBG_DENTRY_COMMON, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, + { + .name = "fd_counter", + .cmd = HNAE3_DBG_CMD_FD_COUNTER, + .dentry = HNS3_DBG_DENTRY_FD, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, + { + .name = "umv_info", + .cmd = HNAE3_DBG_CMD_UMV_INFO, + .dentry = HNS3_DBG_DENTRY_COMMON, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, + { + .name = "page_pool_info", + .cmd = HNAE3_DBG_CMD_PAGE_POOL_INFO, + .dentry = HNS3_DBG_DENTRY_COMMON, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, + { + .name = "coalesce_info", + .cmd = HNAE3_DBG_CMD_COAL_INFO, + .dentry = HNS3_DBG_DENTRY_COMMON, + .buf_len = HNS3_DBG_READ_LEN_1MB, + .init = hns3_dbg_common_file_init, + }, +}; + +static struct hns3_dbg_cap_info hns3_dbg_cap[] = { + { + .name = "support FD", + .cap_bit = HNAE3_DEV_SUPPORT_FD_B, + }, { + .name = "support GRO", + .cap_bit = HNAE3_DEV_SUPPORT_GRO_B, + }, { + .name = "support FEC", + .cap_bit = HNAE3_DEV_SUPPORT_FEC_B, + }, { + .name = "support UDP GSO", + .cap_bit = HNAE3_DEV_SUPPORT_UDP_GSO_B, + }, { + .name = "support PTP", + .cap_bit = HNAE3_DEV_SUPPORT_PTP_B, + }, { + .name = "support INT QL", + .cap_bit = HNAE3_DEV_SUPPORT_INT_QL_B, + }, { + .name = "support HW TX csum", + .cap_bit = HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, + }, { + .name = "support UDP tunnel csum", + .cap_bit = HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, + }, { + .name = "support TX push", + .cap_bit = HNAE3_DEV_SUPPORT_TX_PUSH_B, + }, { + .name = "support imp-controlled PHY", + .cap_bit = HNAE3_DEV_SUPPORT_PHY_IMP_B, + }, { + .name = "support imp-controlled RAS", + .cap_bit = HNAE3_DEV_SUPPORT_RAS_IMP_B, + }, { + .name = "support rxd advanced layout", + .cap_bit = HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B, + }, { + .name = "support port vlan bypass", + .cap_bit = HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, + }, { + .name = "support modify vlan filter state", + .cap_bit = HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, + }, { + .name = "support FEC statistics", + .cap_bit = HNAE3_DEV_SUPPORT_FEC_STATS_B, + }, { + .name = "support lane num", + .cap_bit = HNAE3_DEV_SUPPORT_LANE_NUM_B, + } +}; + +static const struct hns3_dbg_item coal_info_items[] = { + { "VEC_ID", 2 }, + { "ALGO_STATE", 2 }, + { "PROFILE_ID", 2 }, + { "CQE_MODE", 2 }, + { "TUNE_STATE", 2 }, + { "STEPS_LEFT", 2 }, + { "STEPS_RIGHT", 2 }, + { "TIRED", 2 }, + { "SW_GL", 2 }, + { "SW_QL", 2 }, + { "HW_GL", 2 }, + { "HW_QL", 2 }, +}; + +static const char * const dim_cqe_mode_str[] = { "EQE", "CQE" }; +static const char * const dim_state_str[] = { "START", "IN_PROG", "APPLY" }; +static const char * const +dim_tune_stat_str[] = { "ON_TOP", "TIRED", "RIGHT", "LEFT" }; + +static void hns3_dbg_fill_content(char *content, u16 len, + const struct hns3_dbg_item *items, + const char **result, u16 size) +{ +#define HNS3_DBG_LINE_END_LEN 2 + char *pos = content; + u16 item_len; + u16 i; + + if (!len) { + return; + } else if (len <= HNS3_DBG_LINE_END_LEN) { + *pos++ = '\0'; + return; + } + + memset(content, ' ', len); + len -= HNS3_DBG_LINE_END_LEN; + + for (i = 0; i < size; i++) { + item_len = strlen(items[i].name) + items[i].interval; + if (len < item_len) + break; + + if (result) { + if (item_len < strlen(result[i])) + break; + memcpy(pos, result[i], strlen(result[i])); + } else { + memcpy(pos, items[i].name, strlen(items[i].name)); + } + pos += item_len; + len -= item_len; + } + *pos++ = '\n'; + *pos++ = '\0'; +} + +static void hns3_get_coal_info(struct hns3_enet_tqp_vector *tqp_vector, + char **result, int i, bool is_tx) +{ + unsigned int gl_offset, ql_offset; + struct hns3_enet_coalesce *coal; + unsigned int reg_val; + unsigned int j = 0; + struct dim *dim; + bool ql_enable; + + if (is_tx) { + coal = &tqp_vector->tx_group.coal; + dim = &tqp_vector->tx_group.dim; + gl_offset = HNS3_VECTOR_GL1_OFFSET; + ql_offset = HNS3_VECTOR_TX_QL_OFFSET; + ql_enable = tqp_vector->tx_group.coal.ql_enable; + } else { + coal = &tqp_vector->rx_group.coal; + dim = &tqp_vector->rx_group.dim; + gl_offset = HNS3_VECTOR_GL0_OFFSET; + ql_offset = HNS3_VECTOR_RX_QL_OFFSET; + ql_enable = tqp_vector->rx_group.coal.ql_enable; + } + + sprintf(result[j++], "%d", i); + sprintf(result[j++], "%s", dim->state < ARRAY_SIZE(dim_state_str) ? + dim_state_str[dim->state] : "unknown"); + sprintf(result[j++], "%u", dim->profile_ix); + sprintf(result[j++], "%s", dim->mode < ARRAY_SIZE(dim_cqe_mode_str) ? + dim_cqe_mode_str[dim->mode] : "unknown"); + sprintf(result[j++], "%s", + dim->tune_state < ARRAY_SIZE(dim_tune_stat_str) ? + dim_tune_stat_str[dim->tune_state] : "unknown"); + sprintf(result[j++], "%u", dim->steps_left); + sprintf(result[j++], "%u", dim->steps_right); + sprintf(result[j++], "%u", dim->tired); + sprintf(result[j++], "%u", coal->int_gl); + sprintf(result[j++], "%u", coal->int_ql); + reg_val = readl(tqp_vector->mask_addr + gl_offset) & + HNS3_VECTOR_GL_MASK; + sprintf(result[j++], "%u", reg_val); + if (ql_enable) { + reg_val = readl(tqp_vector->mask_addr + ql_offset) & + HNS3_VECTOR_QL_MASK; + sprintf(result[j++], "%u", reg_val); + } else { + sprintf(result[j++], "NA"); + } +} + +static void hns3_dump_coal_info(struct hnae3_handle *h, char *buf, int len, + int *pos, bool is_tx) +{ + char data_str[ARRAY_SIZE(coal_info_items)][HNS3_DBG_DATA_STR_LEN]; + char *result[ARRAY_SIZE(coal_info_items)]; + struct hns3_enet_tqp_vector *tqp_vector; + struct hns3_nic_priv *priv = h->priv; + char content[HNS3_DBG_INFO_LEN]; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(coal_info_items); i++) + result[i] = &data_str[i][0]; + + *pos += scnprintf(buf + *pos, len - *pos, + "%s interrupt coalesce info:\n", + is_tx ? "tx" : "rx"); + hns3_dbg_fill_content(content, sizeof(content), coal_info_items, + NULL, ARRAY_SIZE(coal_info_items)); + *pos += scnprintf(buf + *pos, len - *pos, "%s", content); + + for (i = 0; i < priv->vector_num; i++) { + tqp_vector = &priv->tqp_vector[i]; + hns3_get_coal_info(tqp_vector, result, i, is_tx); + hns3_dbg_fill_content(content, sizeof(content), coal_info_items, + (const char **)result, + ARRAY_SIZE(coal_info_items)); + *pos += scnprintf(buf + *pos, len - *pos, "%s", content); + } +} + +static int hns3_dbg_coal_info(struct hnae3_handle *h, char *buf, int len) +{ + int pos = 0; + + hns3_dump_coal_info(h, buf, len, &pos, true); + pos += scnprintf(buf + pos, len - pos, "\n"); + hns3_dump_coal_info(h, buf, len, &pos, false); + + return 0; +} + +static const struct hns3_dbg_item tx_spare_info_items[] = { + { "QUEUE_ID", 2 }, + { "COPYBREAK", 2 }, + { "LEN", 7 }, + { "NTU", 4 }, + { "NTC", 4 }, + { "LTC", 4 }, + { "DMA", 17 }, +}; + +static void hns3_dbg_tx_spare_info(struct hns3_enet_ring *ring, char *buf, + int len, u32 ring_num, int *pos) +{ + char data_str[ARRAY_SIZE(tx_spare_info_items)][HNS3_DBG_DATA_STR_LEN]; + struct hns3_tx_spare *tx_spare = ring->tx_spare; + char *result[ARRAY_SIZE(tx_spare_info_items)]; + char content[HNS3_DBG_INFO_LEN]; + u32 i, j; + + if (!tx_spare) { + *pos += scnprintf(buf + *pos, len - *pos, + "tx spare buffer is not enabled\n"); + return; + } + + for (i = 0; i < ARRAY_SIZE(tx_spare_info_items); i++) + result[i] = &data_str[i][0]; + + *pos += scnprintf(buf + *pos, len - *pos, "tx spare buffer info\n"); + hns3_dbg_fill_content(content, sizeof(content), tx_spare_info_items, + NULL, ARRAY_SIZE(tx_spare_info_items)); + *pos += scnprintf(buf + *pos, len - *pos, "%s", content); + + for (i = 0; i < ring_num; i++) { + j = 0; + sprintf(result[j++], "%u", i); + sprintf(result[j++], "%u", ring->tx_copybreak); + sprintf(result[j++], "%u", tx_spare->len); + sprintf(result[j++], "%u", tx_spare->next_to_use); + sprintf(result[j++], "%u", tx_spare->next_to_clean); + sprintf(result[j++], "%u", tx_spare->last_to_clean); + sprintf(result[j++], "%pad", &tx_spare->dma); + hns3_dbg_fill_content(content, sizeof(content), + tx_spare_info_items, + (const char **)result, + ARRAY_SIZE(tx_spare_info_items)); + *pos += scnprintf(buf + *pos, len - *pos, "%s", content); + } +} + +static const struct hns3_dbg_item rx_queue_info_items[] = { + { "QUEUE_ID", 2 }, + { "BD_NUM", 2 }, + { "BD_LEN", 2 }, + { "TAIL", 2 }, + { "HEAD", 2 }, + { "FBDNUM", 2 }, + { "PKTNUM", 5 }, + { "COPYBREAK", 2 }, + { "RING_EN", 2 }, + { "RX_RING_EN", 2 }, + { "BASE_ADDR", 10 }, +}; + +static void hns3_dump_rx_queue_info(struct hns3_enet_ring *ring, + struct hnae3_ae_dev *ae_dev, char **result, + u32 index) +{ + u32 base_add_l, base_add_h; + u32 j = 0; + + sprintf(result[j++], "%u", index); + + sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + + HNS3_RING_RX_RING_BD_NUM_REG)); + + sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + + HNS3_RING_RX_RING_BD_LEN_REG)); + + sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + + HNS3_RING_RX_RING_TAIL_REG)); + + sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + + HNS3_RING_RX_RING_HEAD_REG)); + + sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + + HNS3_RING_RX_RING_FBDNUM_REG)); + + sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + + HNS3_RING_RX_RING_PKTNUM_RECORD_REG)); + sprintf(result[j++], "%u", ring->rx_copybreak); + + sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base + + HNS3_RING_EN_REG) ? "on" : "off"); + + if (hnae3_ae_dev_tqp_txrx_indep_supported(ae_dev)) + sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base + + HNS3_RING_RX_EN_REG) ? "on" : "off"); + else + sprintf(result[j++], "%s", "NA"); + + base_add_h = readl_relaxed(ring->tqp->io_base + + HNS3_RING_RX_RING_BASEADDR_H_REG); + base_add_l = readl_relaxed(ring->tqp->io_base + + HNS3_RING_RX_RING_BASEADDR_L_REG); + sprintf(result[j++], "0x%08x%08x", base_add_h, base_add_l); +} + +static int hns3_dbg_rx_queue_info(struct hnae3_handle *h, + char *buf, int len) +{ + char data_str[ARRAY_SIZE(rx_queue_info_items)][HNS3_DBG_DATA_STR_LEN]; + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev); + char *result[ARRAY_SIZE(rx_queue_info_items)]; + struct hns3_nic_priv *priv = h->priv; + char content[HNS3_DBG_INFO_LEN]; + struct hns3_enet_ring *ring; + int pos = 0; + u32 i; + + if (!priv->ring) { + dev_err(&h->pdev->dev, "priv->ring is NULL\n"); + return -EFAULT; + } + + for (i = 0; i < ARRAY_SIZE(rx_queue_info_items); i++) + result[i] = &data_str[i][0]; + + hns3_dbg_fill_content(content, sizeof(content), rx_queue_info_items, + NULL, ARRAY_SIZE(rx_queue_info_items)); + pos += scnprintf(buf + pos, len - pos, "%s", content); + for (i = 0; i < h->kinfo.num_tqps; i++) { + /* Each cycle needs to determine whether the instance is reset, + * to prevent reference to invalid memory. And need to ensure + * that the following code is executed within 100ms. + */ + if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) || + test_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) + return -EPERM; + + ring = &priv->ring[(u32)(i + h->kinfo.num_tqps)]; + hns3_dump_rx_queue_info(ring, ae_dev, result, i); + hns3_dbg_fill_content(content, sizeof(content), + rx_queue_info_items, + (const char **)result, + ARRAY_SIZE(rx_queue_info_items)); + pos += scnprintf(buf + pos, len - pos, "%s", content); + } + + return 0; +} + +static const struct hns3_dbg_item tx_queue_info_items[] = { + { "QUEUE_ID", 2 }, + { "BD_NUM", 2 }, + { "TC", 2 }, + { "TAIL", 2 }, + { "HEAD", 2 }, + { "FBDNUM", 2 }, + { "OFFSET", 2 }, + { "PKTNUM", 5 }, + { "RING_EN", 2 }, + { "TX_RING_EN", 2 }, + { "BASE_ADDR", 10 }, +}; + +static void hns3_dump_tx_queue_info(struct hns3_enet_ring *ring, + struct hnae3_ae_dev *ae_dev, char **result, + u32 index) +{ + u32 base_add_l, base_add_h; + u32 j = 0; + + sprintf(result[j++], "%u", index); + sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + + HNS3_RING_TX_RING_BD_NUM_REG)); + + sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + + HNS3_RING_TX_RING_TC_REG)); + + sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + + HNS3_RING_TX_RING_TAIL_REG)); + + sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + + HNS3_RING_TX_RING_HEAD_REG)); + + sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + + HNS3_RING_TX_RING_FBDNUM_REG)); + + sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + + HNS3_RING_TX_RING_OFFSET_REG)); + + sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + + HNS3_RING_TX_RING_PKTNUM_RECORD_REG)); + + sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base + + HNS3_RING_EN_REG) ? "on" : "off"); + + if (hnae3_ae_dev_tqp_txrx_indep_supported(ae_dev)) + sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base + + HNS3_RING_TX_EN_REG) ? "on" : "off"); + else + sprintf(result[j++], "%s", "NA"); + + base_add_h = readl_relaxed(ring->tqp->io_base + + HNS3_RING_TX_RING_BASEADDR_H_REG); + base_add_l = readl_relaxed(ring->tqp->io_base + + HNS3_RING_TX_RING_BASEADDR_L_REG); + sprintf(result[j++], "0x%08x%08x", base_add_h, base_add_l); +} + +static int hns3_dbg_tx_queue_info(struct hnae3_handle *h, + char *buf, int len) +{ + char data_str[ARRAY_SIZE(tx_queue_info_items)][HNS3_DBG_DATA_STR_LEN]; + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev); + char *result[ARRAY_SIZE(tx_queue_info_items)]; + struct hns3_nic_priv *priv = h->priv; + char content[HNS3_DBG_INFO_LEN]; + struct hns3_enet_ring *ring; + int pos = 0; + u32 i; + + if (!priv->ring) { + dev_err(&h->pdev->dev, "priv->ring is NULL\n"); + return -EFAULT; + } + + for (i = 0; i < ARRAY_SIZE(tx_queue_info_items); i++) + result[i] = &data_str[i][0]; + + hns3_dbg_fill_content(content, sizeof(content), tx_queue_info_items, + NULL, ARRAY_SIZE(tx_queue_info_items)); + pos += scnprintf(buf + pos, len - pos, "%s", content); + + for (i = 0; i < h->kinfo.num_tqps; i++) { + /* Each cycle needs to determine whether the instance is reset, + * to prevent reference to invalid memory. And need to ensure + * that the following code is executed within 100ms. + */ + if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) || + test_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) + return -EPERM; + + ring = &priv->ring[i]; + hns3_dump_tx_queue_info(ring, ae_dev, result, i); + hns3_dbg_fill_content(content, sizeof(content), + tx_queue_info_items, + (const char **)result, + ARRAY_SIZE(tx_queue_info_items)); + pos += scnprintf(buf + pos, len - pos, "%s", content); + } + + hns3_dbg_tx_spare_info(ring, buf, len, h->kinfo.num_tqps, &pos); + + return 0; +} + +static const struct hns3_dbg_item queue_map_items[] = { + { "local_queue_id", 2 }, + { "global_queue_id", 2 }, + { "vector_id", 2 }, +}; + +static int hns3_dbg_queue_map(struct hnae3_handle *h, char *buf, int len) +{ + char data_str[ARRAY_SIZE(queue_map_items)][HNS3_DBG_DATA_STR_LEN]; + char *result[ARRAY_SIZE(queue_map_items)]; + struct hns3_nic_priv *priv = h->priv; + char content[HNS3_DBG_INFO_LEN]; + int pos = 0; + int j; + u32 i; + + if (!h->ae_algo->ops->get_global_queue_id) + return -EOPNOTSUPP; + + for (i = 0; i < ARRAY_SIZE(queue_map_items); i++) + result[i] = &data_str[i][0]; + + hns3_dbg_fill_content(content, sizeof(content), queue_map_items, + NULL, ARRAY_SIZE(queue_map_items)); + pos += scnprintf(buf + pos, len - pos, "%s", content); + for (i = 0; i < h->kinfo.num_tqps; i++) { + if (!priv->ring || !priv->ring[i].tqp_vector) + continue; + j = 0; + sprintf(result[j++], "%u", i); + sprintf(result[j++], "%u", + h->ae_algo->ops->get_global_queue_id(h, i)); + sprintf(result[j++], "%d", + priv->ring[i].tqp_vector->vector_irq); + hns3_dbg_fill_content(content, sizeof(content), queue_map_items, + (const char **)result, + ARRAY_SIZE(queue_map_items)); + pos += scnprintf(buf + pos, len - pos, "%s", content); + } + + return 0; +} + +static const struct hns3_dbg_item rx_bd_info_items[] = { + { "BD_IDX", 3 }, + { "L234_INFO", 2 }, + { "PKT_LEN", 3 }, + { "SIZE", 4 }, + { "RSS_HASH", 4 }, + { "FD_ID", 2 }, + { "VLAN_TAG", 2 }, + { "O_DM_VLAN_ID_FB", 2 }, + { "OT_VLAN_TAG", 2 }, + { "BD_BASE_INFO", 2 }, + { "PTYPE", 2 }, + { "HW_CSUM", 2 }, +}; + +static void hns3_dump_rx_bd_info(struct hns3_nic_priv *priv, + struct hns3_desc *desc, char **result, int idx) +{ + unsigned int j = 0; + + sprintf(result[j++], "%d", idx); + sprintf(result[j++], "%#x", le32_to_cpu(desc->rx.l234_info)); + sprintf(result[j++], "%u", le16_to_cpu(desc->rx.pkt_len)); + sprintf(result[j++], "%u", le16_to_cpu(desc->rx.size)); + sprintf(result[j++], "%#x", le32_to_cpu(desc->rx.rss_hash)); + sprintf(result[j++], "%u", le16_to_cpu(desc->rx.fd_id)); + sprintf(result[j++], "%u", le16_to_cpu(desc->rx.vlan_tag)); + sprintf(result[j++], "%u", le16_to_cpu(desc->rx.o_dm_vlan_id_fb)); + sprintf(result[j++], "%u", le16_to_cpu(desc->rx.ot_vlan_tag)); + sprintf(result[j++], "%#x", le32_to_cpu(desc->rx.bd_base_info)); + if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state)) { + u32 ol_info = le32_to_cpu(desc->rx.ol_info); + + sprintf(result[j++], "%5lu", hnae3_get_field(ol_info, + HNS3_RXD_PTYPE_M, + HNS3_RXD_PTYPE_S)); + sprintf(result[j++], "%7u", le16_to_cpu(desc->csum)); + } else { + sprintf(result[j++], "NA"); + sprintf(result[j++], "NA"); + } +} + +static int hns3_dbg_rx_bd_info(struct hns3_dbg_data *d, char *buf, int len) +{ + char data_str[ARRAY_SIZE(rx_bd_info_items)][HNS3_DBG_DATA_STR_LEN]; + struct hns3_nic_priv *priv = d->handle->priv; + char *result[ARRAY_SIZE(rx_bd_info_items)]; + char content[HNS3_DBG_INFO_LEN]; + struct hns3_enet_ring *ring; + struct hns3_desc *desc; + unsigned int i; + int pos = 0; + + if (d->qid >= d->handle->kinfo.num_tqps) { + dev_err(&d->handle->pdev->dev, + "queue%u is not in use\n", d->qid); + return -EINVAL; + } + + for (i = 0; i < ARRAY_SIZE(rx_bd_info_items); i++) + result[i] = &data_str[i][0]; + + pos += scnprintf(buf + pos, len - pos, + "Queue %u rx bd info:\n", d->qid); + hns3_dbg_fill_content(content, sizeof(content), rx_bd_info_items, + NULL, ARRAY_SIZE(rx_bd_info_items)); + pos += scnprintf(buf + pos, len - pos, "%s", content); + + ring = &priv->ring[d->qid + d->handle->kinfo.num_tqps]; + for (i = 0; i < ring->desc_num; i++) { + desc = &ring->desc[i]; + + hns3_dump_rx_bd_info(priv, desc, result, i); + hns3_dbg_fill_content(content, sizeof(content), + rx_bd_info_items, (const char **)result, + ARRAY_SIZE(rx_bd_info_items)); + pos += scnprintf(buf + pos, len - pos, "%s", content); + } + + return 0; +} + +static const struct hns3_dbg_item tx_bd_info_items[] = { + { "BD_IDX", 2 }, + { "ADDRESS", 13 }, + { "VLAN_TAG", 2 }, + { "SIZE", 2 }, + { "T_CS_VLAN_TSO", 2 }, + { "OT_VLAN_TAG", 3 }, + { "TV", 5 }, + { "OLT_VLAN_LEN", 2 }, + { "PAYLEN_OL4CS", 2 }, + { "BD_FE_SC_VLD", 2 }, + { "MSS_HW_CSUM", 0 }, +}; + +static void hns3_dump_tx_bd_info(struct hns3_nic_priv *priv, + struct hns3_desc *desc, char **result, int idx) +{ + unsigned int j = 0; + + sprintf(result[j++], "%d", idx); + sprintf(result[j++], "%#llx", le64_to_cpu(desc->addr)); + sprintf(result[j++], "%u", le16_to_cpu(desc->tx.vlan_tag)); + sprintf(result[j++], "%u", le16_to_cpu(desc->tx.send_size)); + sprintf(result[j++], "%#x", + le32_to_cpu(desc->tx.type_cs_vlan_tso_len)); + sprintf(result[j++], "%u", le16_to_cpu(desc->tx.outer_vlan_tag)); + sprintf(result[j++], "%u", le16_to_cpu(desc->tx.tv)); + sprintf(result[j++], "%u", + le32_to_cpu(desc->tx.ol_type_vlan_len_msec)); + sprintf(result[j++], "%#x", le32_to_cpu(desc->tx.paylen_ol4cs)); + sprintf(result[j++], "%#x", le16_to_cpu(desc->tx.bdtp_fe_sc_vld_ra_ri)); + sprintf(result[j++], "%u", le16_to_cpu(desc->tx.mss_hw_csum)); +} + +static int hns3_dbg_tx_bd_info(struct hns3_dbg_data *d, char *buf, int len) +{ + char data_str[ARRAY_SIZE(tx_bd_info_items)][HNS3_DBG_DATA_STR_LEN]; + struct hns3_nic_priv *priv = d->handle->priv; + char *result[ARRAY_SIZE(tx_bd_info_items)]; + char content[HNS3_DBG_INFO_LEN]; + struct hns3_enet_ring *ring; + struct hns3_desc *desc; + unsigned int i; + int pos = 0; + + if (d->qid >= d->handle->kinfo.num_tqps) { + dev_err(&d->handle->pdev->dev, + "queue%u is not in use\n", d->qid); + return -EINVAL; + } + + for (i = 0; i < ARRAY_SIZE(tx_bd_info_items); i++) + result[i] = &data_str[i][0]; + + pos += scnprintf(buf + pos, len - pos, + "Queue %u tx bd info:\n", d->qid); + hns3_dbg_fill_content(content, sizeof(content), tx_bd_info_items, + NULL, ARRAY_SIZE(tx_bd_info_items)); + pos += scnprintf(buf + pos, len - pos, "%s", content); + + ring = &priv->ring[d->qid]; + for (i = 0; i < ring->desc_num; i++) { + desc = &ring->desc[i]; + + hns3_dump_tx_bd_info(priv, desc, result, i); + hns3_dbg_fill_content(content, sizeof(content), + tx_bd_info_items, (const char **)result, + ARRAY_SIZE(tx_bd_info_items)); + pos += scnprintf(buf + pos, len - pos, "%s", content); + } + + return 0; +} + +static void +hns3_dbg_dev_caps(struct hnae3_handle *h, char *buf, int len, int *pos) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev); + const char * const str[] = {"no", "yes"}; + unsigned long *caps = ae_dev->caps; + u32 i, state; + + *pos += scnprintf(buf + *pos, len - *pos, "dev capability:\n"); + + for (i = 0; i < ARRAY_SIZE(hns3_dbg_cap); i++) { + state = test_bit(hns3_dbg_cap[i].cap_bit, caps); + *pos += scnprintf(buf + *pos, len - *pos, "%s: %s\n", + hns3_dbg_cap[i].name, str[state]); + } + + *pos += scnprintf(buf + *pos, len - *pos, "\n"); +} + +static void +hns3_dbg_dev_specs(struct hnae3_handle *h, char *buf, int len, int *pos) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev); + struct hnae3_dev_specs *dev_specs = &ae_dev->dev_specs; + struct hnae3_knic_private_info *kinfo = &h->kinfo; + + *pos += scnprintf(buf + *pos, len - *pos, "dev_spec:\n"); + *pos += scnprintf(buf + *pos, len - *pos, "MAC entry num: %u\n", + dev_specs->mac_entry_num); + *pos += scnprintf(buf + *pos, len - *pos, "MNG entry num: %u\n", + dev_specs->mng_entry_num); + *pos += scnprintf(buf + *pos, len - *pos, "MAX non tso bd num: %u\n", + dev_specs->max_non_tso_bd_num); + *pos += scnprintf(buf + *pos, len - *pos, "RSS ind tbl size: %u\n", + dev_specs->rss_ind_tbl_size); + *pos += scnprintf(buf + *pos, len - *pos, "RSS key size: %u\n", + dev_specs->rss_key_size); + *pos += scnprintf(buf + *pos, len - *pos, "RSS size: %u\n", + kinfo->rss_size); + *pos += scnprintf(buf + *pos, len - *pos, "Allocated RSS size: %u\n", + kinfo->req_rss_size); + *pos += scnprintf(buf + *pos, len - *pos, + "Task queue pairs numbers: %u\n", + kinfo->num_tqps); + *pos += scnprintf(buf + *pos, len - *pos, "RX buffer length: %u\n", + kinfo->rx_buf_len); + *pos += scnprintf(buf + *pos, len - *pos, "Desc num per TX queue: %u\n", + kinfo->num_tx_desc); + *pos += scnprintf(buf + *pos, len - *pos, "Desc num per RX queue: %u\n", + kinfo->num_rx_desc); + *pos += scnprintf(buf + *pos, len - *pos, + "Total number of enabled TCs: %u\n", + kinfo->tc_info.num_tc); + *pos += scnprintf(buf + *pos, len - *pos, "MAX INT QL: %u\n", + dev_specs->int_ql_max); + *pos += scnprintf(buf + *pos, len - *pos, "MAX INT GL: %u\n", + dev_specs->max_int_gl); + *pos += scnprintf(buf + *pos, len - *pos, "MAX TM RATE: %u\n", + dev_specs->max_tm_rate); + *pos += scnprintf(buf + *pos, len - *pos, "MAX QSET number: %u\n", + dev_specs->max_qset_num); + *pos += scnprintf(buf + *pos, len - *pos, "umv size: %u\n", + dev_specs->umv_size); + *pos += scnprintf(buf + *pos, len - *pos, "mc mac size: %u\n", + dev_specs->mc_mac_size); + *pos += scnprintf(buf + *pos, len - *pos, "MAC statistics number: %u\n", + dev_specs->mac_stats_num); +} + +static int hns3_dbg_dev_info(struct hnae3_handle *h, char *buf, int len) +{ + int pos = 0; + + hns3_dbg_dev_caps(h, buf, len, &pos); + + hns3_dbg_dev_specs(h, buf, len, &pos); + + return 0; +} + +static const struct hns3_dbg_item page_pool_info_items[] = { + { "QUEUE_ID", 2 }, + { "ALLOCATE_CNT", 2 }, + { "FREE_CNT", 6 }, + { "POOL_SIZE(PAGE_NUM)", 2 }, + { "ORDER", 2 }, + { "NUMA_ID", 2 }, + { "MAX_LEN", 2 }, +}; + +static void hns3_dump_page_pool_info(struct hns3_enet_ring *ring, + char **result, u32 index) +{ + u32 j = 0; + + sprintf(result[j++], "%u", index); + sprintf(result[j++], "%u", + READ_ONCE(ring->page_pool->pages_state_hold_cnt)); + sprintf(result[j++], "%d", + atomic_read(&ring->page_pool->pages_state_release_cnt)); + sprintf(result[j++], "%u", ring->page_pool->p.pool_size); + sprintf(result[j++], "%u", ring->page_pool->p.order); + sprintf(result[j++], "%d", ring->page_pool->p.nid); + sprintf(result[j++], "%uK", ring->page_pool->p.max_len / 1024); +} + +static int +hns3_dbg_page_pool_info(struct hnae3_handle *h, char *buf, int len) +{ + char data_str[ARRAY_SIZE(page_pool_info_items)][HNS3_DBG_DATA_STR_LEN]; + char *result[ARRAY_SIZE(page_pool_info_items)]; + struct hns3_nic_priv *priv = h->priv; + char content[HNS3_DBG_INFO_LEN]; + struct hns3_enet_ring *ring; + int pos = 0; + u32 i; + + if (!priv->ring) { + dev_err(&h->pdev->dev, "priv->ring is NULL\n"); + return -EFAULT; + } + + if (!priv->ring[h->kinfo.num_tqps].page_pool) { + dev_err(&h->pdev->dev, "page pool is not initialized\n"); + return -EFAULT; + } + + for (i = 0; i < ARRAY_SIZE(page_pool_info_items); i++) + result[i] = &data_str[i][0]; + + hns3_dbg_fill_content(content, sizeof(content), page_pool_info_items, + NULL, ARRAY_SIZE(page_pool_info_items)); + pos += scnprintf(buf + pos, len - pos, "%s", content); + for (i = 0; i < h->kinfo.num_tqps; i++) { + if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) || + test_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) + return -EPERM; + ring = &priv->ring[(u32)(i + h->kinfo.num_tqps)]; + hns3_dump_page_pool_info(ring, result, i); + hns3_dbg_fill_content(content, sizeof(content), + page_pool_info_items, + (const char **)result, + ARRAY_SIZE(page_pool_info_items)); + pos += scnprintf(buf + pos, len - pos, "%s", content); + } + + return 0; +} + +static int hns3_dbg_get_cmd_index(struct hns3_dbg_data *dbg_data, u32 *index) +{ + u32 i; + + for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++) { + if (hns3_dbg_cmd[i].cmd == dbg_data->cmd) { + *index = i; + return 0; + } + } + + dev_err(&dbg_data->handle->pdev->dev, "unknown command(%d)\n", + dbg_data->cmd); + return -EINVAL; +} + +static const struct hns3_dbg_func hns3_dbg_cmd_func[] = { + { + .cmd = HNAE3_DBG_CMD_QUEUE_MAP, + .dbg_dump = hns3_dbg_queue_map, + }, + { + .cmd = HNAE3_DBG_CMD_DEV_INFO, + .dbg_dump = hns3_dbg_dev_info, + }, + { + .cmd = HNAE3_DBG_CMD_TX_BD, + .dbg_dump_bd = hns3_dbg_tx_bd_info, + }, + { + .cmd = HNAE3_DBG_CMD_RX_BD, + .dbg_dump_bd = hns3_dbg_rx_bd_info, + }, + { + .cmd = HNAE3_DBG_CMD_RX_QUEUE_INFO, + .dbg_dump = hns3_dbg_rx_queue_info, + }, + { + .cmd = HNAE3_DBG_CMD_TX_QUEUE_INFO, + .dbg_dump = hns3_dbg_tx_queue_info, + }, + { + .cmd = HNAE3_DBG_CMD_PAGE_POOL_INFO, + .dbg_dump = hns3_dbg_page_pool_info, + }, + { + .cmd = HNAE3_DBG_CMD_COAL_INFO, + .dbg_dump = hns3_dbg_coal_info, + }, +}; + +static int hns3_dbg_read_cmd(struct hns3_dbg_data *dbg_data, + enum hnae3_dbg_cmd cmd, char *buf, int len) +{ + const struct hnae3_ae_ops *ops = dbg_data->handle->ae_algo->ops; + const struct hns3_dbg_func *cmd_func; + u32 i; + + for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd_func); i++) { + if (cmd == hns3_dbg_cmd_func[i].cmd) { + cmd_func = &hns3_dbg_cmd_func[i]; + if (cmd_func->dbg_dump) + return cmd_func->dbg_dump(dbg_data->handle, buf, + len); + else + return cmd_func->dbg_dump_bd(dbg_data, buf, + len); + } + } + + if (!ops->dbg_read_cmd) + return -EOPNOTSUPP; + + return ops->dbg_read_cmd(dbg_data->handle, cmd, buf, len); +} + +static ssize_t hns3_dbg_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + struct hns3_dbg_data *dbg_data = filp->private_data; + struct hnae3_handle *handle = dbg_data->handle; + struct hns3_nic_priv *priv = handle->priv; + ssize_t size = 0; + char **save_buf; + char *read_buf; + u32 index; + int ret; + + ret = hns3_dbg_get_cmd_index(dbg_data, &index); + if (ret) + return ret; + + mutex_lock(&handle->dbgfs_lock); + save_buf = &handle->dbgfs_buf[index]; + + if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) || + test_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) { + ret = -EBUSY; + goto out; + } + + if (*save_buf) { + read_buf = *save_buf; + } else { + read_buf = kvzalloc(hns3_dbg_cmd[index].buf_len, GFP_KERNEL); + if (!read_buf) { + ret = -ENOMEM; + goto out; + } + + /* save the buffer addr until the last read operation */ + *save_buf = read_buf; + + /* get data ready for the first time to read */ + ret = hns3_dbg_read_cmd(dbg_data, hns3_dbg_cmd[index].cmd, + read_buf, hns3_dbg_cmd[index].buf_len); + if (ret) + goto out; + } + + size = simple_read_from_buffer(buffer, count, ppos, read_buf, + strlen(read_buf)); + if (size > 0) { + mutex_unlock(&handle->dbgfs_lock); + return size; + } + +out: + /* free the buffer for the last read operation */ + if (*save_buf) { + kvfree(*save_buf); + *save_buf = NULL; + } + + mutex_unlock(&handle->dbgfs_lock); + return ret; +} + +static const struct file_operations hns3_dbg_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = hns3_dbg_read, +}; + +static int hns3_dbg_bd_file_init(struct hnae3_handle *handle, u32 cmd) +{ + struct dentry *entry_dir; + struct hns3_dbg_data *data; + u16 max_queue_num; + unsigned int i; + + entry_dir = hns3_dbg_dentry[hns3_dbg_cmd[cmd].dentry].dentry; + max_queue_num = hns3_get_max_available_channels(handle); + data = devm_kzalloc(&handle->pdev->dev, max_queue_num * sizeof(*data), + GFP_KERNEL); + if (!data) + return -ENOMEM; + + for (i = 0; i < max_queue_num; i++) { + char name[HNS3_DBG_FILE_NAME_LEN]; + + data[i].handle = handle; + data[i].cmd = hns3_dbg_cmd[cmd].cmd; + data[i].qid = i; + sprintf(name, "%s%u", hns3_dbg_cmd[cmd].name, i); + debugfs_create_file(name, 0400, entry_dir, &data[i], + &hns3_dbg_fops); + } + + return 0; +} + +static int +hns3_dbg_common_file_init(struct hnae3_handle *handle, u32 cmd) +{ + struct hns3_dbg_data *data; + struct dentry *entry_dir; + + data = devm_kzalloc(&handle->pdev->dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->handle = handle; + data->cmd = hns3_dbg_cmd[cmd].cmd; + entry_dir = hns3_dbg_dentry[hns3_dbg_cmd[cmd].dentry].dentry; + debugfs_create_file(hns3_dbg_cmd[cmd].name, 0400, entry_dir, + data, &hns3_dbg_fops); + + return 0; +} + +int hns3_dbg_init(struct hnae3_handle *handle) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); + const char *name = pci_name(handle->pdev); + int ret; + u32 i; + + handle->dbgfs_buf = devm_kcalloc(&handle->pdev->dev, + ARRAY_SIZE(hns3_dbg_cmd), + sizeof(*handle->dbgfs_buf), + GFP_KERNEL); + if (!handle->dbgfs_buf) + return -ENOMEM; + + hns3_dbg_dentry[HNS3_DBG_DENTRY_COMMON].dentry = + debugfs_create_dir(name, hns3_dbgfs_root); + handle->hnae3_dbgfs = hns3_dbg_dentry[HNS3_DBG_DENTRY_COMMON].dentry; + + for (i = 0; i < HNS3_DBG_DENTRY_COMMON; i++) + hns3_dbg_dentry[i].dentry = + debugfs_create_dir(hns3_dbg_dentry[i].name, + handle->hnae3_dbgfs); + + mutex_init(&handle->dbgfs_lock); + + for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++) { + if ((hns3_dbg_cmd[i].cmd == HNAE3_DBG_CMD_TM_NODES && + ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) || + (hns3_dbg_cmd[i].cmd == HNAE3_DBG_CMD_PTP_INFO && + !test_bit(HNAE3_DEV_SUPPORT_PTP_B, ae_dev->caps))) + continue; + + if (!hns3_dbg_cmd[i].init) { + dev_err(&handle->pdev->dev, + "cmd %s lack of init func\n", + hns3_dbg_cmd[i].name); + ret = -EINVAL; + goto out; + } + + ret = hns3_dbg_cmd[i].init(handle, i); + if (ret) { + dev_err(&handle->pdev->dev, "failed to init cmd %s\n", + hns3_dbg_cmd[i].name); + goto out; + } + } + + return 0; + +out: + debugfs_remove_recursive(handle->hnae3_dbgfs); + handle->hnae3_dbgfs = NULL; + mutex_destroy(&handle->dbgfs_lock); + return ret; +} + +void hns3_dbg_uninit(struct hnae3_handle *handle) +{ + u32 i; + + debugfs_remove_recursive(handle->hnae3_dbgfs); + handle->hnae3_dbgfs = NULL; + + for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++) + if (handle->dbgfs_buf[i]) { + kvfree(handle->dbgfs_buf[i]); + handle->dbgfs_buf[i] = NULL; + } + + mutex_destroy(&handle->dbgfs_lock); +} + +void hns3_dbg_register_debugfs(const char *debugfs_dir_name) +{ + hns3_dbgfs_root = debugfs_create_dir(debugfs_dir_name, NULL); +} + +void hns3_dbg_unregister_debugfs(void) +{ + debugfs_remove_recursive(hns3_dbgfs_root); + hns3_dbgfs_root = NULL; +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h new file mode 100644 index 000000000..4a5ef8a90 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2021 Hisilicon Limited. */ + +#ifndef __HNS3_DEBUGFS_H +#define __HNS3_DEBUGFS_H + +#include "hnae3.h" + +#define HNS3_DBG_READ_LEN 65536 +#define HNS3_DBG_READ_LEN_128KB 0x20000 +#define HNS3_DBG_READ_LEN_1MB 0x100000 +#define HNS3_DBG_READ_LEN_4MB 0x400000 +#define HNS3_DBG_READ_LEN_5MB 0x500000 +#define HNS3_DBG_WRITE_LEN 1024 + +#define HNS3_DBG_DATA_STR_LEN 32 +#define HNS3_DBG_INFO_LEN 256 +#define HNS3_DBG_ITEM_NAME_LEN 32 +#define HNS3_DBG_FILE_NAME_LEN 16 + +struct hns3_dbg_item { + char name[HNS3_DBG_ITEM_NAME_LEN]; + u16 interval; /* blank numbers after the item */ +}; + +struct hns3_dbg_data { + struct hnae3_handle *handle; + enum hnae3_dbg_cmd cmd; + u16 qid; +}; + +enum hns3_dbg_dentry_type { + HNS3_DBG_DENTRY_TM, + HNS3_DBG_DENTRY_TX_BD, + HNS3_DBG_DENTRY_RX_BD, + HNS3_DBG_DENTRY_MAC, + HNS3_DBG_DENTRY_REG, + HNS3_DBG_DENTRY_QUEUE, + HNS3_DBG_DENTRY_FD, + HNS3_DBG_DENTRY_COMMON, +}; + +struct hns3_dbg_dentry_info { + const char *name; + struct dentry *dentry; +}; + +struct hns3_dbg_cmd_info { + const char *name; + enum hnae3_dbg_cmd cmd; + enum hns3_dbg_dentry_type dentry; + u32 buf_len; + int (*init)(struct hnae3_handle *handle, unsigned int cmd); +}; + +struct hns3_dbg_func { + enum hnae3_dbg_cmd cmd; + int (*dbg_dump)(struct hnae3_handle *handle, char *buf, int len); + int (*dbg_dump_bd)(struct hns3_dbg_data *data, char *buf, int len); +}; + +struct hns3_dbg_cap_info { + const char *name; + enum HNAE3_DEV_CAP_BITS cap_bit; +}; + +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c new file mode 100644 index 000000000..78d6752fe --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -0,0 +1,6012 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. + +#include +#include +#include +#ifdef CONFIG_RFS_ACCEL +#include +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hnae3.h" +#include "hns3_enet.h" +/* All hns3 tracepoints are defined by the include below, which + * must be included exactly once across the whole kernel with + * CREATE_TRACE_POINTS defined + */ +#define CREATE_TRACE_POINTS +#include "hns3_trace.h" + +#define hns3_set_field(origin, shift, val) ((origin) |= (val) << (shift)) +#define hns3_tx_bd_count(S) DIV_ROUND_UP(S, HNS3_MAX_BD_SIZE) + +#define hns3_rl_err(fmt, ...) \ + do { \ + if (net_ratelimit()) \ + netdev_err(fmt, ##__VA_ARGS__); \ + } while (0) + +static void hns3_clear_all_ring(struct hnae3_handle *h, bool force); + +static const char hns3_driver_name[] = "hns3"; +static const char hns3_driver_string[] = + "Hisilicon Ethernet Network Driver for Hip08 Family"; +static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation."; +static struct hnae3_client client; + +static int debug = -1; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, " Network interface message level setting"); + +static unsigned int tx_sgl = 1; +module_param(tx_sgl, uint, 0600); +MODULE_PARM_DESC(tx_sgl, "Minimum number of frags when using dma_map_sg() to optimize the IOMMU mapping"); + +static bool page_pool_enabled = true; +module_param(page_pool_enabled, bool, 0400); + +#define HNS3_SGL_SIZE(nfrag) (sizeof(struct scatterlist) * (nfrag) + \ + sizeof(struct sg_table)) +#define HNS3_MAX_SGL_SIZE ALIGN(HNS3_SGL_SIZE(HNS3_MAX_TSO_BD_NUM), \ + dma_get_cache_alignment()) + +#define DEFAULT_MSG_LEVEL (NETIF_MSG_PROBE | NETIF_MSG_LINK | \ + NETIF_MSG_IFDOWN | NETIF_MSG_IFUP) + +#define HNS3_INNER_VLAN_TAG 1 +#define HNS3_OUTER_VLAN_TAG 2 + +#define HNS3_MIN_TX_LEN 33U +#define HNS3_MIN_TUN_PKT_LEN 65U + +/* hns3_pci_tbl - PCI Device ID Table + * + * Last entry must be all 0s + * + * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, + * Class, Class Mask, private data (not used) } + */ +static const struct pci_device_id hns3_pci_tbl[] = { + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), + HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), + HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), + HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), + HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), + HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), + HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_VF), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF), + HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, + /* required last entry */ + {0,} +}; +MODULE_DEVICE_TABLE(pci, hns3_pci_tbl); + +#define HNS3_RX_PTYPE_ENTRY(ptype, l, s, t, h) \ + { ptype, \ + l, \ + CHECKSUM_##s, \ + HNS3_L3_TYPE_##t, \ + 1, \ + h} + +#define HNS3_RX_PTYPE_UNUSED_ENTRY(ptype) \ + { ptype, 0, CHECKSUM_NONE, HNS3_L3_TYPE_PARSE_FAIL, 0, \ + PKT_HASH_TYPE_NONE } + +static const struct hns3_rx_ptype hns3_rx_ptype_tbl[] = { + HNS3_RX_PTYPE_UNUSED_ENTRY(0), + HNS3_RX_PTYPE_ENTRY(1, 0, COMPLETE, ARP, PKT_HASH_TYPE_NONE), + HNS3_RX_PTYPE_ENTRY(2, 0, COMPLETE, RARP, PKT_HASH_TYPE_NONE), + HNS3_RX_PTYPE_ENTRY(3, 0, COMPLETE, LLDP, PKT_HASH_TYPE_NONE), + HNS3_RX_PTYPE_ENTRY(4, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE), + HNS3_RX_PTYPE_ENTRY(5, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE), + HNS3_RX_PTYPE_ENTRY(6, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE), + HNS3_RX_PTYPE_ENTRY(7, 0, COMPLETE, CNM, PKT_HASH_TYPE_NONE), + HNS3_RX_PTYPE_ENTRY(8, 0, NONE, PARSE_FAIL, PKT_HASH_TYPE_NONE), + HNS3_RX_PTYPE_UNUSED_ENTRY(9), + HNS3_RX_PTYPE_UNUSED_ENTRY(10), + HNS3_RX_PTYPE_UNUSED_ENTRY(11), + HNS3_RX_PTYPE_UNUSED_ENTRY(12), + HNS3_RX_PTYPE_UNUSED_ENTRY(13), + HNS3_RX_PTYPE_UNUSED_ENTRY(14), + HNS3_RX_PTYPE_UNUSED_ENTRY(15), + HNS3_RX_PTYPE_ENTRY(16, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE), + HNS3_RX_PTYPE_ENTRY(17, 0, COMPLETE, IPV4, PKT_HASH_TYPE_NONE), + HNS3_RX_PTYPE_ENTRY(18, 0, COMPLETE, IPV4, PKT_HASH_TYPE_NONE), + HNS3_RX_PTYPE_ENTRY(19, 0, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4), + HNS3_RX_PTYPE_ENTRY(20, 0, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4), + HNS3_RX_PTYPE_ENTRY(21, 0, NONE, IPV4, PKT_HASH_TYPE_NONE), + HNS3_RX_PTYPE_ENTRY(22, 0, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4), + HNS3_RX_PTYPE_ENTRY(23, 0, NONE, IPV4, PKT_HASH_TYPE_L3), + HNS3_RX_PTYPE_ENTRY(24, 0, NONE, IPV4, PKT_HASH_TYPE_L3), + HNS3_RX_PTYPE_ENTRY(25, 0, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4), + HNS3_RX_PTYPE_UNUSED_ENTRY(26), + HNS3_RX_PTYPE_UNUSED_ENTRY(27), + HNS3_RX_PTYPE_UNUSED_ENTRY(28), + HNS3_RX_PTYPE_ENTRY(29, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE), + HNS3_RX_PTYPE_ENTRY(30, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE), + HNS3_RX_PTYPE_ENTRY(31, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3), + HNS3_RX_PTYPE_ENTRY(32, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3), + HNS3_RX_PTYPE_ENTRY(33, 1, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4), + HNS3_RX_PTYPE_ENTRY(34, 1, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4), + HNS3_RX_PTYPE_ENTRY(35, 1, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4), + HNS3_RX_PTYPE_ENTRY(36, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3), + HNS3_RX_PTYPE_ENTRY(37, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3), + HNS3_RX_PTYPE_UNUSED_ENTRY(38), + HNS3_RX_PTYPE_ENTRY(39, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3), + HNS3_RX_PTYPE_ENTRY(40, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3), + HNS3_RX_PTYPE_ENTRY(41, 1, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4), + HNS3_RX_PTYPE_ENTRY(42, 1, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4), + HNS3_RX_PTYPE_ENTRY(43, 1, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4), + HNS3_RX_PTYPE_ENTRY(44, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3), + HNS3_RX_PTYPE_ENTRY(45, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3), + HNS3_RX_PTYPE_UNUSED_ENTRY(46), + HNS3_RX_PTYPE_UNUSED_ENTRY(47), + HNS3_RX_PTYPE_UNUSED_ENTRY(48), + HNS3_RX_PTYPE_UNUSED_ENTRY(49), + HNS3_RX_PTYPE_UNUSED_ENTRY(50), + HNS3_RX_PTYPE_UNUSED_ENTRY(51), + HNS3_RX_PTYPE_UNUSED_ENTRY(52), + HNS3_RX_PTYPE_UNUSED_ENTRY(53), + HNS3_RX_PTYPE_UNUSED_ENTRY(54), + HNS3_RX_PTYPE_UNUSED_ENTRY(55), + HNS3_RX_PTYPE_UNUSED_ENTRY(56), + HNS3_RX_PTYPE_UNUSED_ENTRY(57), + HNS3_RX_PTYPE_UNUSED_ENTRY(58), + HNS3_RX_PTYPE_UNUSED_ENTRY(59), + HNS3_RX_PTYPE_UNUSED_ENTRY(60), + HNS3_RX_PTYPE_UNUSED_ENTRY(61), + HNS3_RX_PTYPE_UNUSED_ENTRY(62), + HNS3_RX_PTYPE_UNUSED_ENTRY(63), + HNS3_RX_PTYPE_UNUSED_ENTRY(64), + HNS3_RX_PTYPE_UNUSED_ENTRY(65), + HNS3_RX_PTYPE_UNUSED_ENTRY(66), + HNS3_RX_PTYPE_UNUSED_ENTRY(67), + HNS3_RX_PTYPE_UNUSED_ENTRY(68), + HNS3_RX_PTYPE_UNUSED_ENTRY(69), + HNS3_RX_PTYPE_UNUSED_ENTRY(70), + HNS3_RX_PTYPE_UNUSED_ENTRY(71), + HNS3_RX_PTYPE_UNUSED_ENTRY(72), + HNS3_RX_PTYPE_UNUSED_ENTRY(73), + HNS3_RX_PTYPE_UNUSED_ENTRY(74), + HNS3_RX_PTYPE_UNUSED_ENTRY(75), + HNS3_RX_PTYPE_UNUSED_ENTRY(76), + HNS3_RX_PTYPE_UNUSED_ENTRY(77), + HNS3_RX_PTYPE_UNUSED_ENTRY(78), + HNS3_RX_PTYPE_UNUSED_ENTRY(79), + HNS3_RX_PTYPE_UNUSED_ENTRY(80), + HNS3_RX_PTYPE_UNUSED_ENTRY(81), + HNS3_RX_PTYPE_UNUSED_ENTRY(82), + HNS3_RX_PTYPE_UNUSED_ENTRY(83), + HNS3_RX_PTYPE_UNUSED_ENTRY(84), + HNS3_RX_PTYPE_UNUSED_ENTRY(85), + HNS3_RX_PTYPE_UNUSED_ENTRY(86), + HNS3_RX_PTYPE_UNUSED_ENTRY(87), + HNS3_RX_PTYPE_UNUSED_ENTRY(88), + HNS3_RX_PTYPE_UNUSED_ENTRY(89), + HNS3_RX_PTYPE_UNUSED_ENTRY(90), + HNS3_RX_PTYPE_UNUSED_ENTRY(91), + HNS3_RX_PTYPE_UNUSED_ENTRY(92), + HNS3_RX_PTYPE_UNUSED_ENTRY(93), + HNS3_RX_PTYPE_UNUSED_ENTRY(94), + HNS3_RX_PTYPE_UNUSED_ENTRY(95), + HNS3_RX_PTYPE_UNUSED_ENTRY(96), + HNS3_RX_PTYPE_UNUSED_ENTRY(97), + HNS3_RX_PTYPE_UNUSED_ENTRY(98), + HNS3_RX_PTYPE_UNUSED_ENTRY(99), + HNS3_RX_PTYPE_UNUSED_ENTRY(100), + HNS3_RX_PTYPE_UNUSED_ENTRY(101), + HNS3_RX_PTYPE_UNUSED_ENTRY(102), + HNS3_RX_PTYPE_UNUSED_ENTRY(103), + HNS3_RX_PTYPE_UNUSED_ENTRY(104), + HNS3_RX_PTYPE_UNUSED_ENTRY(105), + HNS3_RX_PTYPE_UNUSED_ENTRY(106), + HNS3_RX_PTYPE_UNUSED_ENTRY(107), + HNS3_RX_PTYPE_UNUSED_ENTRY(108), + HNS3_RX_PTYPE_UNUSED_ENTRY(109), + HNS3_RX_PTYPE_UNUSED_ENTRY(110), + HNS3_RX_PTYPE_ENTRY(111, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3), + HNS3_RX_PTYPE_ENTRY(112, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3), + HNS3_RX_PTYPE_ENTRY(113, 0, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4), + HNS3_RX_PTYPE_ENTRY(114, 0, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4), + HNS3_RX_PTYPE_ENTRY(115, 0, NONE, IPV6, PKT_HASH_TYPE_L3), + HNS3_RX_PTYPE_ENTRY(116, 0, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4), + HNS3_RX_PTYPE_ENTRY(117, 0, NONE, IPV6, PKT_HASH_TYPE_L3), + HNS3_RX_PTYPE_ENTRY(118, 0, NONE, IPV6, PKT_HASH_TYPE_L3), + HNS3_RX_PTYPE_ENTRY(119, 0, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4), + HNS3_RX_PTYPE_UNUSED_ENTRY(120), + HNS3_RX_PTYPE_UNUSED_ENTRY(121), + HNS3_RX_PTYPE_UNUSED_ENTRY(122), + HNS3_RX_PTYPE_ENTRY(123, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE), + HNS3_RX_PTYPE_ENTRY(124, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE), + HNS3_RX_PTYPE_ENTRY(125, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3), + HNS3_RX_PTYPE_ENTRY(126, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3), + HNS3_RX_PTYPE_ENTRY(127, 1, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4), + HNS3_RX_PTYPE_ENTRY(128, 1, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4), + HNS3_RX_PTYPE_ENTRY(129, 1, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4), + HNS3_RX_PTYPE_ENTRY(130, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3), + HNS3_RX_PTYPE_ENTRY(131, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3), + HNS3_RX_PTYPE_UNUSED_ENTRY(132), + HNS3_RX_PTYPE_ENTRY(133, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3), + HNS3_RX_PTYPE_ENTRY(134, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3), + HNS3_RX_PTYPE_ENTRY(135, 1, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4), + HNS3_RX_PTYPE_ENTRY(136, 1, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4), + HNS3_RX_PTYPE_ENTRY(137, 1, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4), + HNS3_RX_PTYPE_ENTRY(138, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3), + HNS3_RX_PTYPE_ENTRY(139, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3), + HNS3_RX_PTYPE_UNUSED_ENTRY(140), + HNS3_RX_PTYPE_UNUSED_ENTRY(141), + HNS3_RX_PTYPE_UNUSED_ENTRY(142), + HNS3_RX_PTYPE_UNUSED_ENTRY(143), + HNS3_RX_PTYPE_UNUSED_ENTRY(144), + HNS3_RX_PTYPE_UNUSED_ENTRY(145), + HNS3_RX_PTYPE_UNUSED_ENTRY(146), + HNS3_RX_PTYPE_UNUSED_ENTRY(147), + HNS3_RX_PTYPE_UNUSED_ENTRY(148), + HNS3_RX_PTYPE_UNUSED_ENTRY(149), + HNS3_RX_PTYPE_UNUSED_ENTRY(150), + HNS3_RX_PTYPE_UNUSED_ENTRY(151), + HNS3_RX_PTYPE_UNUSED_ENTRY(152), + HNS3_RX_PTYPE_UNUSED_ENTRY(153), + HNS3_RX_PTYPE_UNUSED_ENTRY(154), + HNS3_RX_PTYPE_UNUSED_ENTRY(155), + HNS3_RX_PTYPE_UNUSED_ENTRY(156), + HNS3_RX_PTYPE_UNUSED_ENTRY(157), + HNS3_RX_PTYPE_UNUSED_ENTRY(158), + HNS3_RX_PTYPE_UNUSED_ENTRY(159), + HNS3_RX_PTYPE_UNUSED_ENTRY(160), + HNS3_RX_PTYPE_UNUSED_ENTRY(161), + HNS3_RX_PTYPE_UNUSED_ENTRY(162), + HNS3_RX_PTYPE_UNUSED_ENTRY(163), + HNS3_RX_PTYPE_UNUSED_ENTRY(164), + HNS3_RX_PTYPE_UNUSED_ENTRY(165), + HNS3_RX_PTYPE_UNUSED_ENTRY(166), + HNS3_RX_PTYPE_UNUSED_ENTRY(167), + HNS3_RX_PTYPE_UNUSED_ENTRY(168), + HNS3_RX_PTYPE_UNUSED_ENTRY(169), + HNS3_RX_PTYPE_UNUSED_ENTRY(170), + HNS3_RX_PTYPE_UNUSED_ENTRY(171), + HNS3_RX_PTYPE_UNUSED_ENTRY(172), + HNS3_RX_PTYPE_UNUSED_ENTRY(173), + HNS3_RX_PTYPE_UNUSED_ENTRY(174), + HNS3_RX_PTYPE_UNUSED_ENTRY(175), + HNS3_RX_PTYPE_UNUSED_ENTRY(176), + HNS3_RX_PTYPE_UNUSED_ENTRY(177), + HNS3_RX_PTYPE_UNUSED_ENTRY(178), + HNS3_RX_PTYPE_UNUSED_ENTRY(179), + HNS3_RX_PTYPE_UNUSED_ENTRY(180), + HNS3_RX_PTYPE_UNUSED_ENTRY(181), + HNS3_RX_PTYPE_UNUSED_ENTRY(182), + HNS3_RX_PTYPE_UNUSED_ENTRY(183), + HNS3_RX_PTYPE_UNUSED_ENTRY(184), + HNS3_RX_PTYPE_UNUSED_ENTRY(185), + HNS3_RX_PTYPE_UNUSED_ENTRY(186), + HNS3_RX_PTYPE_UNUSED_ENTRY(187), + HNS3_RX_PTYPE_UNUSED_ENTRY(188), + HNS3_RX_PTYPE_UNUSED_ENTRY(189), + HNS3_RX_PTYPE_UNUSED_ENTRY(190), + HNS3_RX_PTYPE_UNUSED_ENTRY(191), + HNS3_RX_PTYPE_UNUSED_ENTRY(192), + HNS3_RX_PTYPE_UNUSED_ENTRY(193), + HNS3_RX_PTYPE_UNUSED_ENTRY(194), + HNS3_RX_PTYPE_UNUSED_ENTRY(195), + HNS3_RX_PTYPE_UNUSED_ENTRY(196), + HNS3_RX_PTYPE_UNUSED_ENTRY(197), + HNS3_RX_PTYPE_UNUSED_ENTRY(198), + HNS3_RX_PTYPE_UNUSED_ENTRY(199), + HNS3_RX_PTYPE_UNUSED_ENTRY(200), + HNS3_RX_PTYPE_UNUSED_ENTRY(201), + HNS3_RX_PTYPE_UNUSED_ENTRY(202), + HNS3_RX_PTYPE_UNUSED_ENTRY(203), + HNS3_RX_PTYPE_UNUSED_ENTRY(204), + HNS3_RX_PTYPE_UNUSED_ENTRY(205), + HNS3_RX_PTYPE_UNUSED_ENTRY(206), + HNS3_RX_PTYPE_UNUSED_ENTRY(207), + HNS3_RX_PTYPE_UNUSED_ENTRY(208), + HNS3_RX_PTYPE_UNUSED_ENTRY(209), + HNS3_RX_PTYPE_UNUSED_ENTRY(210), + HNS3_RX_PTYPE_UNUSED_ENTRY(211), + HNS3_RX_PTYPE_UNUSED_ENTRY(212), + HNS3_RX_PTYPE_UNUSED_ENTRY(213), + HNS3_RX_PTYPE_UNUSED_ENTRY(214), + HNS3_RX_PTYPE_UNUSED_ENTRY(215), + HNS3_RX_PTYPE_UNUSED_ENTRY(216), + HNS3_RX_PTYPE_UNUSED_ENTRY(217), + HNS3_RX_PTYPE_UNUSED_ENTRY(218), + HNS3_RX_PTYPE_UNUSED_ENTRY(219), + HNS3_RX_PTYPE_UNUSED_ENTRY(220), + HNS3_RX_PTYPE_UNUSED_ENTRY(221), + HNS3_RX_PTYPE_UNUSED_ENTRY(222), + HNS3_RX_PTYPE_UNUSED_ENTRY(223), + HNS3_RX_PTYPE_UNUSED_ENTRY(224), + HNS3_RX_PTYPE_UNUSED_ENTRY(225), + HNS3_RX_PTYPE_UNUSED_ENTRY(226), + HNS3_RX_PTYPE_UNUSED_ENTRY(227), + HNS3_RX_PTYPE_UNUSED_ENTRY(228), + HNS3_RX_PTYPE_UNUSED_ENTRY(229), + HNS3_RX_PTYPE_UNUSED_ENTRY(230), + HNS3_RX_PTYPE_UNUSED_ENTRY(231), + HNS3_RX_PTYPE_UNUSED_ENTRY(232), + HNS3_RX_PTYPE_UNUSED_ENTRY(233), + HNS3_RX_PTYPE_UNUSED_ENTRY(234), + HNS3_RX_PTYPE_UNUSED_ENTRY(235), + HNS3_RX_PTYPE_UNUSED_ENTRY(236), + HNS3_RX_PTYPE_UNUSED_ENTRY(237), + HNS3_RX_PTYPE_UNUSED_ENTRY(238), + HNS3_RX_PTYPE_UNUSED_ENTRY(239), + HNS3_RX_PTYPE_UNUSED_ENTRY(240), + HNS3_RX_PTYPE_UNUSED_ENTRY(241), + HNS3_RX_PTYPE_UNUSED_ENTRY(242), + HNS3_RX_PTYPE_UNUSED_ENTRY(243), + HNS3_RX_PTYPE_UNUSED_ENTRY(244), + HNS3_RX_PTYPE_UNUSED_ENTRY(245), + HNS3_RX_PTYPE_UNUSED_ENTRY(246), + HNS3_RX_PTYPE_UNUSED_ENTRY(247), + HNS3_RX_PTYPE_UNUSED_ENTRY(248), + HNS3_RX_PTYPE_UNUSED_ENTRY(249), + HNS3_RX_PTYPE_UNUSED_ENTRY(250), + HNS3_RX_PTYPE_UNUSED_ENTRY(251), + HNS3_RX_PTYPE_UNUSED_ENTRY(252), + HNS3_RX_PTYPE_UNUSED_ENTRY(253), + HNS3_RX_PTYPE_UNUSED_ENTRY(254), + HNS3_RX_PTYPE_UNUSED_ENTRY(255), +}; + +#define HNS3_INVALID_PTYPE \ + ARRAY_SIZE(hns3_rx_ptype_tbl) + +static irqreturn_t hns3_irq_handle(int irq, void *vector) +{ + struct hns3_enet_tqp_vector *tqp_vector = vector; + + napi_schedule_irqoff(&tqp_vector->napi); + tqp_vector->event_cnt++; + + return IRQ_HANDLED; +} + +static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv) +{ + struct hns3_enet_tqp_vector *tqp_vectors; + unsigned int i; + + for (i = 0; i < priv->vector_num; i++) { + tqp_vectors = &priv->tqp_vector[i]; + + if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED) + continue; + + /* clear the affinity mask */ + irq_set_affinity_hint(tqp_vectors->vector_irq, NULL); + + /* release the irq resource */ + free_irq(tqp_vectors->vector_irq, tqp_vectors); + tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED; + } +} + +static int hns3_nic_init_irq(struct hns3_nic_priv *priv) +{ + struct hns3_enet_tqp_vector *tqp_vectors; + int txrx_int_idx = 0; + int rx_int_idx = 0; + int tx_int_idx = 0; + unsigned int i; + int ret; + + for (i = 0; i < priv->vector_num; i++) { + tqp_vectors = &priv->tqp_vector[i]; + + if (tqp_vectors->irq_init_flag == HNS3_VECTOR_INITED) + continue; + + if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) { + snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN, + "%s-%s-%s-%d", hns3_driver_name, + pci_name(priv->ae_handle->pdev), + "TxRx", txrx_int_idx++); + txrx_int_idx++; + } else if (tqp_vectors->rx_group.ring) { + snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN, + "%s-%s-%s-%d", hns3_driver_name, + pci_name(priv->ae_handle->pdev), + "Rx", rx_int_idx++); + } else if (tqp_vectors->tx_group.ring) { + snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN, + "%s-%s-%s-%d", hns3_driver_name, + pci_name(priv->ae_handle->pdev), + "Tx", tx_int_idx++); + } else { + /* Skip this unused q_vector */ + continue; + } + + tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0'; + + irq_set_status_flags(tqp_vectors->vector_irq, IRQ_NOAUTOEN); + ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0, + tqp_vectors->name, tqp_vectors); + if (ret) { + netdev_err(priv->netdev, "request irq(%d) fail\n", + tqp_vectors->vector_irq); + hns3_nic_uninit_irq(priv); + return ret; + } + + irq_set_affinity_hint(tqp_vectors->vector_irq, + &tqp_vectors->affinity_mask); + + tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED; + } + + return 0; +} + +static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector, + u32 mask_en) +{ + writel(mask_en, tqp_vector->mask_addr); +} + +static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector) +{ + napi_enable(&tqp_vector->napi); + enable_irq(tqp_vector->vector_irq); + + /* enable vector */ + hns3_mask_vector_irq(tqp_vector, 1); +} + +static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector) +{ + /* disable vector */ + hns3_mask_vector_irq(tqp_vector, 0); + + disable_irq(tqp_vector->vector_irq); + napi_disable(&tqp_vector->napi); + cancel_work_sync(&tqp_vector->rx_group.dim.work); + cancel_work_sync(&tqp_vector->tx_group.dim.work); +} + +void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector, + u32 rl_value) +{ + u32 rl_reg = hns3_rl_usec_to_reg(rl_value); + + /* this defines the configuration for RL (Interrupt Rate Limiter). + * Rl defines rate of interrupts i.e. number of interrupts-per-second + * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing + */ + if (rl_reg > 0 && !tqp_vector->tx_group.coal.adapt_enable && + !tqp_vector->rx_group.coal.adapt_enable) + /* According to the hardware, the range of rl_reg is + * 0-59 and the unit is 4. + */ + rl_reg |= HNS3_INT_RL_ENABLE_MASK; + + writel(rl_reg, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET); +} + +void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector, + u32 gl_value) +{ + u32 new_val; + + if (tqp_vector->rx_group.coal.unit_1us) + new_val = gl_value | HNS3_INT_GL_1US; + else + new_val = hns3_gl_usec_to_reg(gl_value); + + writel(new_val, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET); +} + +void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector, + u32 gl_value) +{ + u32 new_val; + + if (tqp_vector->tx_group.coal.unit_1us) + new_val = gl_value | HNS3_INT_GL_1US; + else + new_val = hns3_gl_usec_to_reg(gl_value); + + writel(new_val, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET); +} + +void hns3_set_vector_coalesce_tx_ql(struct hns3_enet_tqp_vector *tqp_vector, + u32 ql_value) +{ + writel(ql_value, tqp_vector->mask_addr + HNS3_VECTOR_TX_QL_OFFSET); +} + +void hns3_set_vector_coalesce_rx_ql(struct hns3_enet_tqp_vector *tqp_vector, + u32 ql_value) +{ + writel(ql_value, tqp_vector->mask_addr + HNS3_VECTOR_RX_QL_OFFSET); +} + +static void hns3_vector_coalesce_init(struct hns3_enet_tqp_vector *tqp_vector, + struct hns3_nic_priv *priv) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev); + struct hns3_enet_coalesce *tx_coal = &tqp_vector->tx_group.coal; + struct hns3_enet_coalesce *rx_coal = &tqp_vector->rx_group.coal; + struct hns3_enet_coalesce *ptx_coal = &priv->tx_coal; + struct hns3_enet_coalesce *prx_coal = &priv->rx_coal; + + tx_coal->adapt_enable = ptx_coal->adapt_enable; + rx_coal->adapt_enable = prx_coal->adapt_enable; + + tx_coal->int_gl = ptx_coal->int_gl; + rx_coal->int_gl = prx_coal->int_gl; + + rx_coal->flow_level = prx_coal->flow_level; + tx_coal->flow_level = ptx_coal->flow_level; + + /* device version above V3(include V3), GL can configure 1us + * unit, so uses 1us unit. + */ + if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) { + tx_coal->unit_1us = 1; + rx_coal->unit_1us = 1; + } + + if (ae_dev->dev_specs.int_ql_max) { + tx_coal->ql_enable = 1; + rx_coal->ql_enable = 1; + tx_coal->int_ql_max = ae_dev->dev_specs.int_ql_max; + rx_coal->int_ql_max = ae_dev->dev_specs.int_ql_max; + tx_coal->int_ql = ptx_coal->int_ql; + rx_coal->int_ql = prx_coal->int_ql; + } +} + +static void +hns3_vector_coalesce_init_hw(struct hns3_enet_tqp_vector *tqp_vector, + struct hns3_nic_priv *priv) +{ + struct hns3_enet_coalesce *tx_coal = &tqp_vector->tx_group.coal; + struct hns3_enet_coalesce *rx_coal = &tqp_vector->rx_group.coal; + struct hnae3_handle *h = priv->ae_handle; + + hns3_set_vector_coalesce_tx_gl(tqp_vector, tx_coal->int_gl); + hns3_set_vector_coalesce_rx_gl(tqp_vector, rx_coal->int_gl); + hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting); + + if (tx_coal->ql_enable) + hns3_set_vector_coalesce_tx_ql(tqp_vector, tx_coal->int_ql); + + if (rx_coal->ql_enable) + hns3_set_vector_coalesce_rx_ql(tqp_vector, rx_coal->int_ql); +} + +static int hns3_nic_set_real_num_queue(struct net_device *netdev) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + struct hnae3_knic_private_info *kinfo = &h->kinfo; + struct hnae3_tc_info *tc_info = &kinfo->tc_info; + unsigned int queue_size = kinfo->num_tqps; + int i, ret; + + if (tc_info->num_tc <= 1 && !tc_info->mqprio_active) { + netdev_reset_tc(netdev); + } else { + ret = netdev_set_num_tc(netdev, tc_info->num_tc); + if (ret) { + netdev_err(netdev, + "netdev_set_num_tc fail, ret=%d!\n", ret); + return ret; + } + + for (i = 0; i < tc_info->num_tc; i++) + netdev_set_tc_queue(netdev, i, tc_info->tqp_count[i], + tc_info->tqp_offset[i]); + } + + ret = netif_set_real_num_tx_queues(netdev, queue_size); + if (ret) { + netdev_err(netdev, + "netif_set_real_num_tx_queues fail, ret=%d!\n", ret); + return ret; + } + + ret = netif_set_real_num_rx_queues(netdev, queue_size); + if (ret) { + netdev_err(netdev, + "netif_set_real_num_rx_queues fail, ret=%d!\n", ret); + return ret; + } + + return 0; +} + +u16 hns3_get_max_available_channels(struct hnae3_handle *h) +{ + u16 alloc_tqps, max_rss_size, rss_size; + + h->ae_algo->ops->get_tqps_and_rss_info(h, &alloc_tqps, &max_rss_size); + rss_size = alloc_tqps / h->kinfo.tc_info.num_tc; + + return min_t(u16, rss_size, max_rss_size); +} + +static void hns3_tqp_enable(struct hnae3_queue *tqp) +{ + u32 rcb_reg; + + rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG); + rcb_reg |= BIT(HNS3_RING_EN_B); + hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg); +} + +static void hns3_tqp_disable(struct hnae3_queue *tqp) +{ + u32 rcb_reg; + + rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG); + rcb_reg &= ~BIT(HNS3_RING_EN_B); + hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg); +} + +static void hns3_free_rx_cpu_rmap(struct net_device *netdev) +{ +#ifdef CONFIG_RFS_ACCEL + free_irq_cpu_rmap(netdev->rx_cpu_rmap); + netdev->rx_cpu_rmap = NULL; +#endif +} + +static int hns3_set_rx_cpu_rmap(struct net_device *netdev) +{ +#ifdef CONFIG_RFS_ACCEL + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hns3_enet_tqp_vector *tqp_vector; + int i, ret; + + if (!netdev->rx_cpu_rmap) { + netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->vector_num); + if (!netdev->rx_cpu_rmap) + return -ENOMEM; + } + + for (i = 0; i < priv->vector_num; i++) { + tqp_vector = &priv->tqp_vector[i]; + ret = irq_cpu_rmap_add(netdev->rx_cpu_rmap, + tqp_vector->vector_irq); + if (ret) { + hns3_free_rx_cpu_rmap(netdev); + return ret; + } + } +#endif + return 0; +} + +static int hns3_nic_net_up(struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + int i, j; + int ret; + + ret = hns3_nic_reset_all_ring(h); + if (ret) + return ret; + + clear_bit(HNS3_NIC_STATE_DOWN, &priv->state); + + /* enable the vectors */ + for (i = 0; i < priv->vector_num; i++) + hns3_vector_enable(&priv->tqp_vector[i]); + + /* enable rcb */ + for (j = 0; j < h->kinfo.num_tqps; j++) + hns3_tqp_enable(h->kinfo.tqp[j]); + + /* start the ae_dev */ + ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0; + if (ret) { + set_bit(HNS3_NIC_STATE_DOWN, &priv->state); + while (j--) + hns3_tqp_disable(h->kinfo.tqp[j]); + + for (j = i - 1; j >= 0; j--) + hns3_vector_disable(&priv->tqp_vector[j]); + } + + return ret; +} + +static void hns3_config_xps(struct hns3_nic_priv *priv) +{ + int i; + + for (i = 0; i < priv->vector_num; i++) { + struct hns3_enet_tqp_vector *tqp_vector = &priv->tqp_vector[i]; + struct hns3_enet_ring *ring = tqp_vector->tx_group.ring; + + while (ring) { + int ret; + + ret = netif_set_xps_queue(priv->netdev, + &tqp_vector->affinity_mask, + ring->tqp->tqp_index); + if (ret) + netdev_warn(priv->netdev, + "set xps queue failed: %d", ret); + + ring = ring->next; + } + } +} + +static int hns3_nic_net_open(struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = hns3_get_handle(netdev); + struct hnae3_knic_private_info *kinfo; + int i, ret; + + if (hns3_nic_resetting(netdev)) + return -EBUSY; + + if (!test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) { + netdev_warn(netdev, "net open repeatedly!\n"); + return 0; + } + + netif_carrier_off(netdev); + + ret = hns3_nic_set_real_num_queue(netdev); + if (ret) + return ret; + + ret = hns3_nic_net_up(netdev); + if (ret) { + netdev_err(netdev, "net up fail, ret=%d!\n", ret); + return ret; + } + + kinfo = &h->kinfo; + for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) + netdev_set_prio_tc_map(netdev, i, kinfo->tc_info.prio_tc[i]); + + if (h->ae_algo->ops->set_timer_task) + h->ae_algo->ops->set_timer_task(priv->ae_handle, true); + + hns3_config_xps(priv); + + netif_dbg(h, drv, netdev, "net open\n"); + + return 0; +} + +static void hns3_reset_tx_queue(struct hnae3_handle *h) +{ + struct net_device *ndev = h->kinfo.netdev; + struct hns3_nic_priv *priv = netdev_priv(ndev); + struct netdev_queue *dev_queue; + u32 i; + + for (i = 0; i < h->kinfo.num_tqps; i++) { + dev_queue = netdev_get_tx_queue(ndev, + priv->ring[i].queue_index); + netdev_tx_reset_queue(dev_queue); + } +} + +static void hns3_nic_net_down(struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = hns3_get_handle(netdev); + const struct hnae3_ae_ops *ops; + int i; + + /* disable vectors */ + for (i = 0; i < priv->vector_num; i++) + hns3_vector_disable(&priv->tqp_vector[i]); + + /* disable rcb */ + for (i = 0; i < h->kinfo.num_tqps; i++) + hns3_tqp_disable(h->kinfo.tqp[i]); + + /* stop ae_dev */ + ops = priv->ae_handle->ae_algo->ops; + if (ops->stop) + ops->stop(priv->ae_handle); + + /* delay ring buffer clearing to hns3_reset_notify_uninit_enet + * during reset process, because driver may not be able + * to disable the ring through firmware when downing the netdev. + */ + if (!hns3_nic_resetting(netdev)) + hns3_clear_all_ring(priv->ae_handle, false); + + hns3_reset_tx_queue(priv->ae_handle); +} + +static int hns3_nic_net_stop(struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state)) + return 0; + + netif_dbg(h, drv, netdev, "net stop\n"); + + if (h->ae_algo->ops->set_timer_task) + h->ae_algo->ops->set_timer_task(priv->ae_handle, false); + + netif_carrier_off(netdev); + netif_tx_disable(netdev); + + hns3_nic_net_down(netdev); + + return 0; +} + +static int hns3_nic_uc_sync(struct net_device *netdev, + const unsigned char *addr) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (h->ae_algo->ops->add_uc_addr) + return h->ae_algo->ops->add_uc_addr(h, addr); + + return 0; +} + +static int hns3_nic_uc_unsync(struct net_device *netdev, + const unsigned char *addr) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + /* need ignore the request of removing device address, because + * we store the device address and other addresses of uc list + * in the function's mac filter list. + */ + if (ether_addr_equal(addr, netdev->dev_addr)) + return 0; + + if (h->ae_algo->ops->rm_uc_addr) + return h->ae_algo->ops->rm_uc_addr(h, addr); + + return 0; +} + +static int hns3_nic_mc_sync(struct net_device *netdev, + const unsigned char *addr) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (h->ae_algo->ops->add_mc_addr) + return h->ae_algo->ops->add_mc_addr(h, addr); + + return 0; +} + +static int hns3_nic_mc_unsync(struct net_device *netdev, + const unsigned char *addr) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (h->ae_algo->ops->rm_mc_addr) + return h->ae_algo->ops->rm_mc_addr(h, addr); + + return 0; +} + +static u8 hns3_get_netdev_flags(struct net_device *netdev) +{ + u8 flags = 0; + + if (netdev->flags & IFF_PROMISC) + flags = HNAE3_USER_UPE | HNAE3_USER_MPE | HNAE3_BPE; + else if (netdev->flags & IFF_ALLMULTI) + flags = HNAE3_USER_MPE; + + return flags; +} + +static void hns3_nic_set_rx_mode(struct net_device *netdev) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + u8 new_flags; + + new_flags = hns3_get_netdev_flags(netdev); + + __dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync); + __dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync); + + /* User mode Promisc mode enable and vlan filtering is disabled to + * let all packets in. + */ + h->netdev_flags = new_flags; + hns3_request_update_promisc_mode(h); +} + +void hns3_request_update_promisc_mode(struct hnae3_handle *handle) +{ + const struct hnae3_ae_ops *ops = handle->ae_algo->ops; + + if (ops->request_update_promisc_mode) + ops->request_update_promisc_mode(handle); +} + +static u32 hns3_tx_spare_space(struct hns3_enet_ring *ring) +{ + struct hns3_tx_spare *tx_spare = ring->tx_spare; + u32 ntc, ntu; + + /* This smp_load_acquire() pairs with smp_store_release() in + * hns3_tx_spare_update() called in tx desc cleaning process. + */ + ntc = smp_load_acquire(&tx_spare->last_to_clean); + ntu = tx_spare->next_to_use; + + if (ntc > ntu) + return ntc - ntu - 1; + + /* The free tx buffer is divided into two part, so pick the + * larger one. + */ + return max(ntc, tx_spare->len - ntu) - 1; +} + +static void hns3_tx_spare_update(struct hns3_enet_ring *ring) +{ + struct hns3_tx_spare *tx_spare = ring->tx_spare; + + if (!tx_spare || + tx_spare->last_to_clean == tx_spare->next_to_clean) + return; + + /* This smp_store_release() pairs with smp_load_acquire() in + * hns3_tx_spare_space() called in xmit process. + */ + smp_store_release(&tx_spare->last_to_clean, + tx_spare->next_to_clean); +} + +static bool hns3_can_use_tx_bounce(struct hns3_enet_ring *ring, + struct sk_buff *skb, + u32 space) +{ + u32 len = skb->len <= ring->tx_copybreak ? skb->len : + skb_headlen(skb); + + if (len > ring->tx_copybreak) + return false; + + if (ALIGN(len, dma_get_cache_alignment()) > space) { + hns3_ring_stats_update(ring, tx_spare_full); + return false; + } + + return true; +} + +static bool hns3_can_use_tx_sgl(struct hns3_enet_ring *ring, + struct sk_buff *skb, + u32 space) +{ + if (skb->len <= ring->tx_copybreak || !tx_sgl || + (!skb_has_frag_list(skb) && + skb_shinfo(skb)->nr_frags < tx_sgl)) + return false; + + if (space < HNS3_MAX_SGL_SIZE) { + hns3_ring_stats_update(ring, tx_spare_full); + return false; + } + + return true; +} + +static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring) +{ + u32 alloc_size = ring->tqp->handle->kinfo.tx_spare_buf_size; + struct hns3_tx_spare *tx_spare; + struct page *page; + dma_addr_t dma; + int order; + + if (!alloc_size) + return; + + order = get_order(alloc_size); + if (order >= MAX_ORDER) { + if (net_ratelimit()) + dev_warn(ring_to_dev(ring), "failed to allocate tx spare buffer, exceed to max order\n"); + return; + } + + tx_spare = devm_kzalloc(ring_to_dev(ring), sizeof(*tx_spare), + GFP_KERNEL); + if (!tx_spare) { + /* The driver still work without the tx spare buffer */ + dev_warn(ring_to_dev(ring), "failed to allocate hns3_tx_spare\n"); + goto devm_kzalloc_error; + } + + page = alloc_pages_node(dev_to_node(ring_to_dev(ring)), + GFP_KERNEL, order); + if (!page) { + dev_warn(ring_to_dev(ring), "failed to allocate tx spare pages\n"); + goto alloc_pages_error; + } + + dma = dma_map_page(ring_to_dev(ring), page, 0, + PAGE_SIZE << order, DMA_TO_DEVICE); + if (dma_mapping_error(ring_to_dev(ring), dma)) { + dev_warn(ring_to_dev(ring), "failed to map pages for tx spare\n"); + goto dma_mapping_error; + } + + tx_spare->dma = dma; + tx_spare->buf = page_address(page); + tx_spare->len = PAGE_SIZE << order; + ring->tx_spare = tx_spare; + return; + +dma_mapping_error: + put_page(page); +alloc_pages_error: + devm_kfree(ring_to_dev(ring), tx_spare); +devm_kzalloc_error: + ring->tqp->handle->kinfo.tx_spare_buf_size = 0; +} + +/* Use hns3_tx_spare_space() to make sure there is enough buffer + * before calling below function to allocate tx buffer. + */ +static void *hns3_tx_spare_alloc(struct hns3_enet_ring *ring, + unsigned int size, dma_addr_t *dma, + u32 *cb_len) +{ + struct hns3_tx_spare *tx_spare = ring->tx_spare; + u32 ntu = tx_spare->next_to_use; + + size = ALIGN(size, dma_get_cache_alignment()); + *cb_len = size; + + /* Tx spare buffer wraps back here because the end of + * freed tx buffer is not enough. + */ + if (ntu + size > tx_spare->len) { + *cb_len += (tx_spare->len - ntu); + ntu = 0; + } + + tx_spare->next_to_use = ntu + size; + if (tx_spare->next_to_use == tx_spare->len) + tx_spare->next_to_use = 0; + + *dma = tx_spare->dma + ntu; + + return tx_spare->buf + ntu; +} + +static void hns3_tx_spare_rollback(struct hns3_enet_ring *ring, u32 len) +{ + struct hns3_tx_spare *tx_spare = ring->tx_spare; + + if (len > tx_spare->next_to_use) { + len -= tx_spare->next_to_use; + tx_spare->next_to_use = tx_spare->len - len; + } else { + tx_spare->next_to_use -= len; + } +} + +static void hns3_tx_spare_reclaim_cb(struct hns3_enet_ring *ring, + struct hns3_desc_cb *cb) +{ + struct hns3_tx_spare *tx_spare = ring->tx_spare; + u32 ntc = tx_spare->next_to_clean; + u32 len = cb->length; + + tx_spare->next_to_clean += len; + + if (tx_spare->next_to_clean >= tx_spare->len) { + tx_spare->next_to_clean -= tx_spare->len; + + if (tx_spare->next_to_clean) { + ntc = 0; + len = tx_spare->next_to_clean; + } + } + + /* This tx spare buffer is only really reclaimed after calling + * hns3_tx_spare_update(), so it is still safe to use the info in + * the tx buffer to do the dma sync or sg unmapping after + * tx_spare->next_to_clean is moved forword. + */ + if (cb->type & (DESC_TYPE_BOUNCE_HEAD | DESC_TYPE_BOUNCE_ALL)) { + dma_addr_t dma = tx_spare->dma + ntc; + + dma_sync_single_for_cpu(ring_to_dev(ring), dma, len, + DMA_TO_DEVICE); + } else { + struct sg_table *sgt = tx_spare->buf + ntc; + + dma_unmap_sg(ring_to_dev(ring), sgt->sgl, sgt->orig_nents, + DMA_TO_DEVICE); + } +} + +static int hns3_set_tso(struct sk_buff *skb, u32 *paylen_fdop_ol4cs, + u16 *mss, u32 *type_cs_vlan_tso, u32 *send_bytes) +{ + u32 l4_offset, hdr_len; + union l3_hdr_info l3; + union l4_hdr_info l4; + u32 l4_paylen; + int ret; + + if (!skb_is_gso(skb)) + return 0; + + ret = skb_cow_head(skb, 0); + if (unlikely(ret < 0)) + return ret; + + l3.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); + + /* Software should clear the IPv4's checksum field when tso is + * needed. + */ + if (l3.v4->version == 4) + l3.v4->check = 0; + + /* tunnel packet */ + if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | + SKB_GSO_GRE_CSUM | + SKB_GSO_UDP_TUNNEL | + SKB_GSO_UDP_TUNNEL_CSUM)) { + /* reset l3&l4 pointers from outer to inner headers */ + l3.hdr = skb_inner_network_header(skb); + l4.hdr = skb_inner_transport_header(skb); + + /* Software should clear the IPv4's checksum field when + * tso is needed. + */ + if (l3.v4->version == 4) + l3.v4->check = 0; + } + + /* normal or tunnel packet */ + l4_offset = l4.hdr - skb->data; + + /* remove payload length from inner pseudo checksum when tso */ + l4_paylen = skb->len - l4_offset; + + if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { + hdr_len = sizeof(*l4.udp) + l4_offset; + csum_replace_by_diff(&l4.udp->check, + (__force __wsum)htonl(l4_paylen)); + } else { + hdr_len = (l4.tcp->doff << 2) + l4_offset; + csum_replace_by_diff(&l4.tcp->check, + (__force __wsum)htonl(l4_paylen)); + } + + *send_bytes = (skb_shinfo(skb)->gso_segs - 1) * hdr_len + skb->len; + + /* find the txbd field values */ + *paylen_fdop_ol4cs = skb->len - hdr_len; + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_TSO_B, 1); + + /* offload outer UDP header checksum */ + if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) + hns3_set_field(*paylen_fdop_ol4cs, HNS3_TXD_OL4CS_B, 1); + + /* get MSS for TSO */ + *mss = skb_shinfo(skb)->gso_size; + + trace_hns3_tso(skb); + + return 0; +} + +static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto, + u8 *il4_proto) +{ + union l3_hdr_info l3; + unsigned char *l4_hdr; + unsigned char *exthdr; + u8 l4_proto_tmp; + __be16 frag_off; + + /* find outer header point */ + l3.hdr = skb_network_header(skb); + l4_hdr = skb_transport_header(skb); + + if (skb->protocol == htons(ETH_P_IPV6)) { + exthdr = l3.hdr + sizeof(*l3.v6); + l4_proto_tmp = l3.v6->nexthdr; + if (l4_hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &l4_proto_tmp, &frag_off); + } else if (skb->protocol == htons(ETH_P_IP)) { + l4_proto_tmp = l3.v4->protocol; + } else { + return -EINVAL; + } + + *ol4_proto = l4_proto_tmp; + + /* tunnel packet */ + if (!skb->encapsulation) { + *il4_proto = 0; + return 0; + } + + /* find inner header point */ + l3.hdr = skb_inner_network_header(skb); + l4_hdr = skb_inner_transport_header(skb); + + if (l3.v6->version == 6) { + exthdr = l3.hdr + sizeof(*l3.v6); + l4_proto_tmp = l3.v6->nexthdr; + if (l4_hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &l4_proto_tmp, &frag_off); + } else if (l3.v4->version == 4) { + l4_proto_tmp = l3.v4->protocol; + } + + *il4_proto = l4_proto_tmp; + + return 0; +} + +/* when skb->encapsulation is 0, skb->ip_summed is CHECKSUM_PARTIAL + * and it is udp packet, which has a dest port as the IANA assigned. + * the hardware is expected to do the checksum offload, but the + * hardware will not do the checksum offload when udp dest port is + * 4789, 4790 or 6081. + */ +static bool hns3_tunnel_csum_bug(struct sk_buff *skb) +{ + struct hns3_nic_priv *priv = netdev_priv(skb->dev); + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev); + union l4_hdr_info l4; + + /* device version above V3(include V3), the hardware can + * do this checksum offload. + */ + if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) + return false; + + l4.hdr = skb_transport_header(skb); + + if (!(!skb->encapsulation && + (l4.udp->dest == htons(IANA_VXLAN_UDP_PORT) || + l4.udp->dest == htons(GENEVE_UDP_PORT) || + l4.udp->dest == htons(IANA_VXLAN_GPE_UDP_PORT)))) + return false; + + return true; +} + +static void hns3_set_outer_l2l3l4(struct sk_buff *skb, u8 ol4_proto, + u32 *ol_type_vlan_len_msec) +{ + u32 l2_len, l3_len, l4_len; + unsigned char *il2_hdr; + union l3_hdr_info l3; + union l4_hdr_info l4; + + l3.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); + + /* compute OL2 header size, defined in 2 Bytes */ + l2_len = l3.hdr - skb->data; + hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L2LEN_S, l2_len >> 1); + + /* compute OL3 header size, defined in 4 Bytes */ + l3_len = l4.hdr - l3.hdr; + hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_S, l3_len >> 2); + + il2_hdr = skb_inner_mac_header(skb); + /* compute OL4 header size, defined in 4 Bytes */ + l4_len = il2_hdr - l4.hdr; + hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_S, l4_len >> 2); + + /* define outer network header type */ + if (skb->protocol == htons(ETH_P_IP)) { + if (skb_is_gso(skb)) + hns3_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_OL3T_S, + HNS3_OL3T_IPV4_CSUM); + else + hns3_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_OL3T_S, + HNS3_OL3T_IPV4_NO_CSUM); + } else if (skb->protocol == htons(ETH_P_IPV6)) { + hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_S, + HNS3_OL3T_IPV6); + } + + if (ol4_proto == IPPROTO_UDP) + hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_TUNTYPE_S, + HNS3_TUN_MAC_IN_UDP); + else if (ol4_proto == IPPROTO_GRE) + hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_TUNTYPE_S, + HNS3_TUN_NVGRE); +} + +static void hns3_set_l3_type(struct sk_buff *skb, union l3_hdr_info l3, + u32 *type_cs_vlan_tso) +{ + if (l3.v4->version == 4) { + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S, + HNS3_L3T_IPV4); + + /* the stack computes the IP header already, the only time we + * need the hardware to recompute it is in the case of TSO. + */ + if (skb_is_gso(skb)) + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1); + } else if (l3.v6->version == 6) { + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S, + HNS3_L3T_IPV6); + } +} + +static int hns3_set_l4_csum_length(struct sk_buff *skb, union l4_hdr_info l4, + u32 l4_proto, u32 *type_cs_vlan_tso) +{ + /* compute inner(/normal) L4 header size, defined in 4 Bytes */ + switch (l4_proto) { + case IPPROTO_TCP: + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S, + HNS3_L4T_TCP); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S, + l4.tcp->doff); + break; + case IPPROTO_UDP: + if (hns3_tunnel_csum_bug(skb)) { + int ret = skb_put_padto(skb, HNS3_MIN_TUN_PKT_LEN); + + return ret ? ret : skb_checksum_help(skb); + } + + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S, + HNS3_L4T_UDP); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S, + (sizeof(struct udphdr) >> 2)); + break; + case IPPROTO_SCTP: + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S, + HNS3_L4T_SCTP); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S, + (sizeof(struct sctphdr) >> 2)); + break; + default: + /* drop the skb tunnel packet if hardware don't support, + * because hardware can't calculate csum when TSO. + */ + if (skb_is_gso(skb)) + return -EDOM; + + /* the stack computes the IP header already, + * driver calculate l4 checksum when not TSO. + */ + return skb_checksum_help(skb); + } + + return 0; +} + +static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto, + u8 il4_proto, u32 *type_cs_vlan_tso, + u32 *ol_type_vlan_len_msec) +{ + unsigned char *l2_hdr = skb->data; + u32 l4_proto = ol4_proto; + union l4_hdr_info l4; + union l3_hdr_info l3; + u32 l2_len, l3_len; + + l4.hdr = skb_transport_header(skb); + l3.hdr = skb_network_header(skb); + + /* handle encapsulation skb */ + if (skb->encapsulation) { + /* If this is a not UDP/GRE encapsulation skb */ + if (!(ol4_proto == IPPROTO_UDP || ol4_proto == IPPROTO_GRE)) { + /* drop the skb tunnel packet if hardware don't support, + * because hardware can't calculate csum when TSO. + */ + if (skb_is_gso(skb)) + return -EDOM; + + /* the stack computes the IP header already, + * driver calculate l4 checksum when not TSO. + */ + return skb_checksum_help(skb); + } + + hns3_set_outer_l2l3l4(skb, ol4_proto, ol_type_vlan_len_msec); + + /* switch to inner header */ + l2_hdr = skb_inner_mac_header(skb); + l3.hdr = skb_inner_network_header(skb); + l4.hdr = skb_inner_transport_header(skb); + l4_proto = il4_proto; + } + + hns3_set_l3_type(skb, l3, type_cs_vlan_tso); + + /* compute inner(/normal) L2 header size, defined in 2 Bytes */ + l2_len = l3.hdr - l2_hdr; + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, l2_len >> 1); + + /* compute inner(/normal) L3 header size, defined in 4 Bytes */ + l3_len = l4.hdr - l3.hdr; + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_S, l3_len >> 2); + + return hns3_set_l4_csum_length(skb, l4, l4_proto, type_cs_vlan_tso); +} + +static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring, + struct sk_buff *skb) +{ + struct hnae3_handle *handle = tx_ring->tqp->handle; + struct hnae3_ae_dev *ae_dev; + struct vlan_ethhdr *vhdr; + int rc; + + if (!(skb->protocol == htons(ETH_P_8021Q) || + skb_vlan_tag_present(skb))) + return 0; + + /* For HW limitation on HNAE3_DEVICE_VERSION_V2, if port based insert + * VLAN enabled, only one VLAN header is allowed in skb, otherwise it + * will cause RAS error. + */ + ae_dev = pci_get_drvdata(handle->pdev); + if (unlikely(skb_vlan_tagged_multi(skb) && + ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 && + handle->port_base_vlan_state == + HNAE3_PORT_BASE_VLAN_ENABLE)) + return -EINVAL; + + if (skb->protocol == htons(ETH_P_8021Q) && + !(handle->kinfo.netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) { + /* When HW VLAN acceleration is turned off, and the stack + * sets the protocol to 802.1q, the driver just need to + * set the protocol to the encapsulated ethertype. + */ + skb->protocol = vlan_get_protocol(skb); + return 0; + } + + if (skb_vlan_tag_present(skb)) { + /* Based on hw strategy, use out_vtag in two layer tag case, + * and use inner_vtag in one tag case. + */ + if (skb->protocol == htons(ETH_P_8021Q) && + handle->port_base_vlan_state == + HNAE3_PORT_BASE_VLAN_DISABLE) + rc = HNS3_OUTER_VLAN_TAG; + else + rc = HNS3_INNER_VLAN_TAG; + + skb->protocol = vlan_get_protocol(skb); + return rc; + } + + rc = skb_cow_head(skb, 0); + if (unlikely(rc < 0)) + return rc; + + vhdr = skb_vlan_eth_hdr(skb); + vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority << VLAN_PRIO_SHIFT) + & VLAN_PRIO_MASK); + + skb->protocol = vlan_get_protocol(skb); + return 0; +} + +/* check if the hardware is capable of checksum offloading */ +static bool hns3_check_hw_tx_csum(struct sk_buff *skb) +{ + struct hns3_nic_priv *priv = netdev_priv(skb->dev); + + /* Kindly note, due to backward compatibility of the TX descriptor, + * HW checksum of the non-IP packets and GSO packets is handled at + * different place in the following code + */ + if (skb_csum_is_sctp(skb) || skb_is_gso(skb) || + !test_bit(HNS3_NIC_STATE_HW_TX_CSUM_ENABLE, &priv->state)) + return false; + + return true; +} + +struct hns3_desc_param { + u32 paylen_ol4cs; + u32 ol_type_vlan_len_msec; + u32 type_cs_vlan_tso; + u16 mss_hw_csum; + u16 inner_vtag; + u16 out_vtag; +}; + +static void hns3_init_desc_data(struct sk_buff *skb, struct hns3_desc_param *pa) +{ + pa->paylen_ol4cs = skb->len; + pa->ol_type_vlan_len_msec = 0; + pa->type_cs_vlan_tso = 0; + pa->mss_hw_csum = 0; + pa->inner_vtag = 0; + pa->out_vtag = 0; +} + +static int hns3_handle_vlan_info(struct hns3_enet_ring *ring, + struct sk_buff *skb, + struct hns3_desc_param *param) +{ + int ret; + + ret = hns3_handle_vtags(ring, skb); + if (unlikely(ret < 0)) { + hns3_ring_stats_update(ring, tx_vlan_err); + return ret; + } else if (ret == HNS3_INNER_VLAN_TAG) { + param->inner_vtag = skb_vlan_tag_get(skb); + param->inner_vtag |= (skb->priority << VLAN_PRIO_SHIFT) & + VLAN_PRIO_MASK; + hns3_set_field(param->type_cs_vlan_tso, HNS3_TXD_VLAN_B, 1); + } else if (ret == HNS3_OUTER_VLAN_TAG) { + param->out_vtag = skb_vlan_tag_get(skb); + param->out_vtag |= (skb->priority << VLAN_PRIO_SHIFT) & + VLAN_PRIO_MASK; + hns3_set_field(param->ol_type_vlan_len_msec, HNS3_TXD_OVLAN_B, + 1); + } + return 0; +} + +static int hns3_handle_csum_partial(struct hns3_enet_ring *ring, + struct sk_buff *skb, + struct hns3_desc_cb *desc_cb, + struct hns3_desc_param *param) +{ + u8 ol4_proto, il4_proto; + int ret; + + if (hns3_check_hw_tx_csum(skb)) { + /* set checksum start and offset, defined in 2 Bytes */ + hns3_set_field(param->type_cs_vlan_tso, HNS3_TXD_CSUM_START_S, + skb_checksum_start_offset(skb) >> 1); + hns3_set_field(param->ol_type_vlan_len_msec, + HNS3_TXD_CSUM_OFFSET_S, + skb->csum_offset >> 1); + param->mss_hw_csum |= BIT(HNS3_TXD_HW_CS_B); + return 0; + } + + skb_reset_mac_len(skb); + + ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto); + if (unlikely(ret < 0)) { + hns3_ring_stats_update(ring, tx_l4_proto_err); + return ret; + } + + ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto, + ¶m->type_cs_vlan_tso, + ¶m->ol_type_vlan_len_msec); + if (unlikely(ret < 0)) { + hns3_ring_stats_update(ring, tx_l2l3l4_err); + return ret; + } + + ret = hns3_set_tso(skb, ¶m->paylen_ol4cs, ¶m->mss_hw_csum, + ¶m->type_cs_vlan_tso, &desc_cb->send_bytes); + if (unlikely(ret < 0)) { + hns3_ring_stats_update(ring, tx_tso_err); + return ret; + } + return 0; +} + +static int hns3_fill_skb_desc(struct hns3_enet_ring *ring, + struct sk_buff *skb, struct hns3_desc *desc, + struct hns3_desc_cb *desc_cb) +{ + struct hns3_desc_param param; + int ret; + + hns3_init_desc_data(skb, ¶m); + ret = hns3_handle_vlan_info(ring, skb, ¶m); + if (unlikely(ret < 0)) + return ret; + + desc_cb->send_bytes = skb->len; + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + ret = hns3_handle_csum_partial(ring, skb, desc_cb, ¶m); + if (ret) + return ret; + } + + /* Set txbd */ + desc->tx.ol_type_vlan_len_msec = + cpu_to_le32(param.ol_type_vlan_len_msec); + desc->tx.type_cs_vlan_tso_len = cpu_to_le32(param.type_cs_vlan_tso); + desc->tx.paylen_ol4cs = cpu_to_le32(param.paylen_ol4cs); + desc->tx.mss_hw_csum = cpu_to_le16(param.mss_hw_csum); + desc->tx.vlan_tag = cpu_to_le16(param.inner_vtag); + desc->tx.outer_vlan_tag = cpu_to_le16(param.out_vtag); + + return 0; +} + +static int hns3_fill_desc(struct hns3_enet_ring *ring, dma_addr_t dma, + unsigned int size) +{ +#define HNS3_LIKELY_BD_NUM 1 + + struct hns3_desc *desc = &ring->desc[ring->next_to_use]; + unsigned int frag_buf_num; + int k, sizeoflast; + + if (likely(size <= HNS3_MAX_BD_SIZE)) { + desc->addr = cpu_to_le64(dma); + desc->tx.send_size = cpu_to_le16(size); + desc->tx.bdtp_fe_sc_vld_ra_ri = + cpu_to_le16(BIT(HNS3_TXD_VLD_B)); + + trace_hns3_tx_desc(ring, ring->next_to_use); + ring_ptr_move_fw(ring, next_to_use); + return HNS3_LIKELY_BD_NUM; + } + + frag_buf_num = hns3_tx_bd_count(size); + sizeoflast = size % HNS3_MAX_BD_SIZE; + sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE; + + /* When frag size is bigger than hardware limit, split this frag */ + for (k = 0; k < frag_buf_num; k++) { + /* now, fill the descriptor */ + desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k); + desc->tx.send_size = cpu_to_le16((k == frag_buf_num - 1) ? + (u16)sizeoflast : (u16)HNS3_MAX_BD_SIZE); + desc->tx.bdtp_fe_sc_vld_ra_ri = + cpu_to_le16(BIT(HNS3_TXD_VLD_B)); + + trace_hns3_tx_desc(ring, ring->next_to_use); + /* move ring pointer to next */ + ring_ptr_move_fw(ring, next_to_use); + + desc = &ring->desc[ring->next_to_use]; + } + + return frag_buf_num; +} + +static int hns3_map_and_fill_desc(struct hns3_enet_ring *ring, void *priv, + unsigned int type) +{ + struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; + struct device *dev = ring_to_dev(ring); + unsigned int size; + dma_addr_t dma; + + if (type & (DESC_TYPE_FRAGLIST_SKB | DESC_TYPE_SKB)) { + struct sk_buff *skb = (struct sk_buff *)priv; + + size = skb_headlen(skb); + if (!size) + return 0; + + dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); + } else if (type & DESC_TYPE_BOUNCE_HEAD) { + /* Head data has been filled in hns3_handle_tx_bounce(), + * just return 0 here. + */ + return 0; + } else { + skb_frag_t *frag = (skb_frag_t *)priv; + + size = skb_frag_size(frag); + if (!size) + return 0; + + dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE); + } + + if (unlikely(dma_mapping_error(dev, dma))) { + hns3_ring_stats_update(ring, sw_err_cnt); + return -ENOMEM; + } + + desc_cb->priv = priv; + desc_cb->length = size; + desc_cb->dma = dma; + desc_cb->type = type; + + return hns3_fill_desc(ring, dma, size); +} + +static unsigned int hns3_skb_bd_num(struct sk_buff *skb, unsigned int *bd_size, + unsigned int bd_num) +{ + unsigned int size; + int i; + + size = skb_headlen(skb); + while (size > HNS3_MAX_BD_SIZE) { + bd_size[bd_num++] = HNS3_MAX_BD_SIZE; + size -= HNS3_MAX_BD_SIZE; + + if (bd_num > HNS3_MAX_TSO_BD_NUM) + return bd_num; + } + + if (size) { + bd_size[bd_num++] = size; + if (bd_num > HNS3_MAX_TSO_BD_NUM) + return bd_num; + } + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + size = skb_frag_size(frag); + if (!size) + continue; + + while (size > HNS3_MAX_BD_SIZE) { + bd_size[bd_num++] = HNS3_MAX_BD_SIZE; + size -= HNS3_MAX_BD_SIZE; + + if (bd_num > HNS3_MAX_TSO_BD_NUM) + return bd_num; + } + + bd_size[bd_num++] = size; + if (bd_num > HNS3_MAX_TSO_BD_NUM) + return bd_num; + } + + return bd_num; +} + +static unsigned int hns3_tx_bd_num(struct sk_buff *skb, unsigned int *bd_size, + u8 max_non_tso_bd_num, unsigned int bd_num, + unsigned int recursion_level) +{ +#define HNS3_MAX_RECURSION_LEVEL 24 + + struct sk_buff *frag_skb; + + /* If the total len is within the max bd limit */ + if (likely(skb->len <= HNS3_MAX_BD_SIZE && !recursion_level && + !skb_has_frag_list(skb) && + skb_shinfo(skb)->nr_frags < max_non_tso_bd_num)) + return skb_shinfo(skb)->nr_frags + 1U; + + if (unlikely(recursion_level >= HNS3_MAX_RECURSION_LEVEL)) + return UINT_MAX; + + bd_num = hns3_skb_bd_num(skb, bd_size, bd_num); + if (!skb_has_frag_list(skb) || bd_num > HNS3_MAX_TSO_BD_NUM) + return bd_num; + + skb_walk_frags(skb, frag_skb) { + bd_num = hns3_tx_bd_num(frag_skb, bd_size, max_non_tso_bd_num, + bd_num, recursion_level + 1); + if (bd_num > HNS3_MAX_TSO_BD_NUM) + return bd_num; + } + + return bd_num; +} + +static unsigned int hns3_gso_hdr_len(struct sk_buff *skb) +{ + if (!skb->encapsulation) + return skb_tcp_all_headers(skb); + + return skb_inner_tcp_all_headers(skb); +} + +/* HW need every continuous max_non_tso_bd_num buffer data to be larger + * than MSS, we simplify it by ensuring skb_headlen + the first continuous + * max_non_tso_bd_num - 1 frags to be larger than gso header len + mss, + * and the remaining continuous max_non_tso_bd_num - 1 frags to be larger + * than MSS except the last max_non_tso_bd_num - 1 frags. + */ +static bool hns3_skb_need_linearized(struct sk_buff *skb, unsigned int *bd_size, + unsigned int bd_num, u8 max_non_tso_bd_num) +{ + unsigned int tot_len = 0; + int i; + + for (i = 0; i < max_non_tso_bd_num - 1U; i++) + tot_len += bd_size[i]; + + /* ensure the first max_non_tso_bd_num frags is greater than + * mss + header + */ + if (tot_len + bd_size[max_non_tso_bd_num - 1U] < + skb_shinfo(skb)->gso_size + hns3_gso_hdr_len(skb)) + return true; + + /* ensure every continuous max_non_tso_bd_num - 1 buffer is greater + * than mss except the last one. + */ + for (i = 0; i < bd_num - max_non_tso_bd_num; i++) { + tot_len -= bd_size[i]; + tot_len += bd_size[i + max_non_tso_bd_num - 1U]; + + if (tot_len < skb_shinfo(skb)->gso_size) + return true; + } + + return false; +} + +void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size) +{ + int i; + + for (i = 0; i < MAX_SKB_FRAGS; i++) + size[i] = skb_frag_size(&shinfo->frags[i]); +} + +static int hns3_skb_linearize(struct hns3_enet_ring *ring, + struct sk_buff *skb, + unsigned int bd_num) +{ + /* 'bd_num == UINT_MAX' means the skb' fraglist has a + * recursion level of over HNS3_MAX_RECURSION_LEVEL. + */ + if (bd_num == UINT_MAX) { + hns3_ring_stats_update(ring, over_max_recursion); + return -ENOMEM; + } + + /* The skb->len has exceeded the hw limitation, linearization + * will not help. + */ + if (skb->len > HNS3_MAX_TSO_SIZE || + (!skb_is_gso(skb) && skb->len > HNS3_MAX_NON_TSO_SIZE)) { + hns3_ring_stats_update(ring, hw_limitation); + return -ENOMEM; + } + + if (__skb_linearize(skb)) { + hns3_ring_stats_update(ring, sw_err_cnt); + return -ENOMEM; + } + + return 0; +} + +static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring, + struct net_device *netdev, + struct sk_buff *skb) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + u8 max_non_tso_bd_num = priv->max_non_tso_bd_num; + unsigned int bd_size[HNS3_MAX_TSO_BD_NUM + 1U]; + unsigned int bd_num; + + bd_num = hns3_tx_bd_num(skb, bd_size, max_non_tso_bd_num, 0, 0); + if (unlikely(bd_num > max_non_tso_bd_num)) { + if (bd_num <= HNS3_MAX_TSO_BD_NUM && skb_is_gso(skb) && + !hns3_skb_need_linearized(skb, bd_size, bd_num, + max_non_tso_bd_num)) { + trace_hns3_over_max_bd(skb); + goto out; + } + + if (hns3_skb_linearize(ring, skb, bd_num)) + return -ENOMEM; + + bd_num = hns3_tx_bd_count(skb->len); + + hns3_ring_stats_update(ring, tx_copy); + } + +out: + if (likely(ring_space(ring) >= bd_num)) + return bd_num; + + netif_stop_subqueue(netdev, ring->queue_index); + smp_mb(); /* Memory barrier before checking ring_space */ + + /* Start queue in case hns3_clean_tx_ring has just made room + * available and has not seen the queue stopped state performed + * by netif_stop_subqueue above. + */ + if (ring_space(ring) >= bd_num && netif_carrier_ok(netdev) && + !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) { + netif_start_subqueue(netdev, ring->queue_index); + return bd_num; + } + + hns3_ring_stats_update(ring, tx_busy); + + return -EBUSY; +} + +static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig) +{ + struct device *dev = ring_to_dev(ring); + unsigned int i; + + for (i = 0; i < ring->desc_num; i++) { + struct hns3_desc *desc = &ring->desc[ring->next_to_use]; + struct hns3_desc_cb *desc_cb; + + memset(desc, 0, sizeof(*desc)); + + /* check if this is where we started */ + if (ring->next_to_use == next_to_use_orig) + break; + + /* rollback one */ + ring_ptr_move_bw(ring, next_to_use); + + desc_cb = &ring->desc_cb[ring->next_to_use]; + + if (!desc_cb->dma) + continue; + + /* unmap the descriptor dma address */ + if (desc_cb->type & (DESC_TYPE_SKB | DESC_TYPE_FRAGLIST_SKB)) + dma_unmap_single(dev, desc_cb->dma, desc_cb->length, + DMA_TO_DEVICE); + else if (desc_cb->type & + (DESC_TYPE_BOUNCE_HEAD | DESC_TYPE_BOUNCE_ALL)) + hns3_tx_spare_rollback(ring, desc_cb->length); + else if (desc_cb->length) + dma_unmap_page(dev, desc_cb->dma, desc_cb->length, + DMA_TO_DEVICE); + + desc_cb->length = 0; + desc_cb->dma = 0; + desc_cb->type = DESC_TYPE_UNKNOWN; + } +} + +static int hns3_fill_skb_to_desc(struct hns3_enet_ring *ring, + struct sk_buff *skb, unsigned int type) +{ + struct sk_buff *frag_skb; + int i, ret, bd_num = 0; + + ret = hns3_map_and_fill_desc(ring, skb, type); + if (unlikely(ret < 0)) + return ret; + + bd_num += ret; + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + ret = hns3_map_and_fill_desc(ring, frag, DESC_TYPE_PAGE); + if (unlikely(ret < 0)) + return ret; + + bd_num += ret; + } + + skb_walk_frags(skb, frag_skb) { + ret = hns3_fill_skb_to_desc(ring, frag_skb, + DESC_TYPE_FRAGLIST_SKB); + if (unlikely(ret < 0)) + return ret; + + bd_num += ret; + } + + return bd_num; +} + +static void hns3_tx_push_bd(struct hns3_enet_ring *ring, int num) +{ +#define HNS3_BYTES_PER_64BIT 8 + + struct hns3_desc desc[HNS3_MAX_PUSH_BD_NUM] = {}; + int offset = 0; + + /* make sure everything is visible to device before + * excuting tx push or updating doorbell + */ + dma_wmb(); + + do { + int idx = (ring->next_to_use - num + ring->desc_num) % + ring->desc_num; + + u64_stats_update_begin(&ring->syncp); + ring->stats.tx_push++; + u64_stats_update_end(&ring->syncp); + memcpy(&desc[offset], &ring->desc[idx], + sizeof(struct hns3_desc)); + offset++; + } while (--num); + + __iowrite64_copy(ring->tqp->mem_base, desc, + (sizeof(struct hns3_desc) * HNS3_MAX_PUSH_BD_NUM) / + HNS3_BYTES_PER_64BIT); + + io_stop_wc(); +} + +static void hns3_tx_mem_doorbell(struct hns3_enet_ring *ring) +{ +#define HNS3_MEM_DOORBELL_OFFSET 64 + + __le64 bd_num = cpu_to_le64((u64)ring->pending_buf); + + /* make sure everything is visible to device before + * excuting tx push or updating doorbell + */ + dma_wmb(); + + __iowrite64_copy(ring->tqp->mem_base + HNS3_MEM_DOORBELL_OFFSET, + &bd_num, 1); + u64_stats_update_begin(&ring->syncp); + ring->stats.tx_mem_doorbell += ring->pending_buf; + u64_stats_update_end(&ring->syncp); + + io_stop_wc(); +} + +static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num, + bool doorbell) +{ + struct net_device *netdev = ring_to_netdev(ring); + struct hns3_nic_priv *priv = netdev_priv(netdev); + + /* when tx push is enabled, the packet whose number of BD below + * HNS3_MAX_PUSH_BD_NUM can be pushed directly. + */ + if (test_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state) && num && + !ring->pending_buf && num <= HNS3_MAX_PUSH_BD_NUM && doorbell) { + /* This smp_store_release() pairs with smp_load_aquire() in + * hns3_nic_reclaim_desc(). Ensure that the BD valid bit + * is updated. + */ + smp_store_release(&ring->last_to_use, ring->next_to_use); + hns3_tx_push_bd(ring, num); + return; + } + + ring->pending_buf += num; + + if (!doorbell) { + hns3_ring_stats_update(ring, tx_more); + return; + } + + /* This smp_store_release() pairs with smp_load_aquire() in + * hns3_nic_reclaim_desc(). Ensure that the BD valid bit is updated. + */ + smp_store_release(&ring->last_to_use, ring->next_to_use); + + if (ring->tqp->mem_base) + hns3_tx_mem_doorbell(ring); + else + writel(ring->pending_buf, + ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG); + + ring->pending_buf = 0; +} + +static void hns3_tsyn(struct net_device *netdev, struct sk_buff *skb, + struct hns3_desc *desc) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (!(h->ae_algo->ops->set_tx_hwts_info && + h->ae_algo->ops->set_tx_hwts_info(h, skb))) + return; + + desc->tx.bdtp_fe_sc_vld_ra_ri |= cpu_to_le16(BIT(HNS3_TXD_TSYN_B)); +} + +static int hns3_handle_tx_bounce(struct hns3_enet_ring *ring, + struct sk_buff *skb) +{ + struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; + unsigned int type = DESC_TYPE_BOUNCE_HEAD; + unsigned int size = skb_headlen(skb); + dma_addr_t dma; + int bd_num = 0; + u32 cb_len; + void *buf; + int ret; + + if (skb->len <= ring->tx_copybreak) { + size = skb->len; + type = DESC_TYPE_BOUNCE_ALL; + } + + /* hns3_can_use_tx_bounce() is called to ensure the below + * function can always return the tx buffer. + */ + buf = hns3_tx_spare_alloc(ring, size, &dma, &cb_len); + + ret = skb_copy_bits(skb, 0, buf, size); + if (unlikely(ret < 0)) { + hns3_tx_spare_rollback(ring, cb_len); + hns3_ring_stats_update(ring, copy_bits_err); + return ret; + } + + desc_cb->priv = skb; + desc_cb->length = cb_len; + desc_cb->dma = dma; + desc_cb->type = type; + + bd_num += hns3_fill_desc(ring, dma, size); + + if (type == DESC_TYPE_BOUNCE_HEAD) { + ret = hns3_fill_skb_to_desc(ring, skb, + DESC_TYPE_BOUNCE_HEAD); + if (unlikely(ret < 0)) + return ret; + + bd_num += ret; + } + + dma_sync_single_for_device(ring_to_dev(ring), dma, size, + DMA_TO_DEVICE); + + hns3_ring_stats_update(ring, tx_bounce); + + return bd_num; +} + +static int hns3_handle_tx_sgl(struct hns3_enet_ring *ring, + struct sk_buff *skb) +{ + struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; + u32 nfrag = skb_shinfo(skb)->nr_frags + 1; + struct sg_table *sgt; + int i, bd_num = 0; + dma_addr_t dma; + u32 cb_len; + int nents; + + if (skb_has_frag_list(skb)) + nfrag = HNS3_MAX_TSO_BD_NUM; + + /* hns3_can_use_tx_sgl() is called to ensure the below + * function can always return the tx buffer. + */ + sgt = hns3_tx_spare_alloc(ring, HNS3_SGL_SIZE(nfrag), + &dma, &cb_len); + + /* scatterlist follows by the sg table */ + sgt->sgl = (struct scatterlist *)(sgt + 1); + sg_init_table(sgt->sgl, nfrag); + nents = skb_to_sgvec(skb, sgt->sgl, 0, skb->len); + if (unlikely(nents < 0)) { + hns3_tx_spare_rollback(ring, cb_len); + hns3_ring_stats_update(ring, skb2sgl_err); + return -ENOMEM; + } + + sgt->orig_nents = nents; + sgt->nents = dma_map_sg(ring_to_dev(ring), sgt->sgl, sgt->orig_nents, + DMA_TO_DEVICE); + if (unlikely(!sgt->nents)) { + hns3_tx_spare_rollback(ring, cb_len); + hns3_ring_stats_update(ring, map_sg_err); + return -ENOMEM; + } + + desc_cb->priv = skb; + desc_cb->length = cb_len; + desc_cb->dma = dma; + desc_cb->type = DESC_TYPE_SGL_SKB; + + for (i = 0; i < sgt->nents; i++) + bd_num += hns3_fill_desc(ring, sg_dma_address(sgt->sgl + i), + sg_dma_len(sgt->sgl + i)); + hns3_ring_stats_update(ring, tx_sgl); + + return bd_num; +} + +static int hns3_handle_desc_filling(struct hns3_enet_ring *ring, + struct sk_buff *skb) +{ + u32 space; + + if (!ring->tx_spare) + goto out; + + space = hns3_tx_spare_space(ring); + + if (hns3_can_use_tx_sgl(ring, skb, space)) + return hns3_handle_tx_sgl(ring, skb); + + if (hns3_can_use_tx_bounce(ring, skb, space)) + return hns3_handle_tx_bounce(ring, skb); + +out: + return hns3_fill_skb_to_desc(ring, skb, DESC_TYPE_SKB); +} + +static int hns3_handle_skb_desc(struct hns3_enet_ring *ring, + struct sk_buff *skb, + struct hns3_desc_cb *desc_cb, + int next_to_use_head) +{ + int ret; + + ret = hns3_fill_skb_desc(ring, skb, &ring->desc[ring->next_to_use], + desc_cb); + if (unlikely(ret < 0)) + goto fill_err; + + /* 'ret < 0' means filling error, 'ret == 0' means skb->len is + * zero, which is unlikely, and 'ret > 0' means how many tx desc + * need to be notified to the hw. + */ + ret = hns3_handle_desc_filling(ring, skb); + if (likely(ret > 0)) + return ret; + +fill_err: + hns3_clear_desc(ring, next_to_use_head); + return ret; +} + +netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hns3_enet_ring *ring = &priv->ring[skb->queue_mapping]; + struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; + struct netdev_queue *dev_queue; + int pre_ntu, ret; + bool doorbell; + + /* Hardware can only handle short frames above 32 bytes */ + if (skb_put_padto(skb, HNS3_MIN_TX_LEN)) { + hns3_tx_doorbell(ring, 0, !netdev_xmit_more()); + + hns3_ring_stats_update(ring, sw_err_cnt); + + return NETDEV_TX_OK; + } + + /* Prefetch the data used later */ + prefetch(skb->data); + + ret = hns3_nic_maybe_stop_tx(ring, netdev, skb); + if (unlikely(ret <= 0)) { + if (ret == -EBUSY) { + hns3_tx_doorbell(ring, 0, true); + return NETDEV_TX_BUSY; + } + + hns3_rl_err(netdev, "xmit error: %d!\n", ret); + goto out_err_tx_ok; + } + + ret = hns3_handle_skb_desc(ring, skb, desc_cb, ring->next_to_use); + if (unlikely(ret <= 0)) + goto out_err_tx_ok; + + pre_ntu = ring->next_to_use ? (ring->next_to_use - 1) : + (ring->desc_num - 1); + + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) + hns3_tsyn(netdev, skb, &ring->desc[pre_ntu]); + + ring->desc[pre_ntu].tx.bdtp_fe_sc_vld_ra_ri |= + cpu_to_le16(BIT(HNS3_TXD_FE_B)); + trace_hns3_tx_desc(ring, pre_ntu); + + skb_tx_timestamp(skb); + + /* Complete translate all packets */ + dev_queue = netdev_get_tx_queue(netdev, ring->queue_index); + doorbell = __netdev_tx_sent_queue(dev_queue, desc_cb->send_bytes, + netdev_xmit_more()); + hns3_tx_doorbell(ring, ret, doorbell); + + return NETDEV_TX_OK; + +out_err_tx_ok: + dev_kfree_skb_any(skb); + hns3_tx_doorbell(ring, 0, !netdev_xmit_more()); + return NETDEV_TX_OK; +} + +static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p) +{ + char format_mac_addr_perm[HNAE3_FORMAT_MAC_ADDR_LEN]; + char format_mac_addr_sa[HNAE3_FORMAT_MAC_ADDR_LEN]; + struct hnae3_handle *h = hns3_get_handle(netdev); + struct sockaddr *mac_addr = p; + int ret; + + if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data)) + return -EADDRNOTAVAIL; + + if (ether_addr_equal(netdev->dev_addr, mac_addr->sa_data)) { + hnae3_format_mac_addr(format_mac_addr_sa, mac_addr->sa_data); + netdev_info(netdev, "already using mac address %s\n", + format_mac_addr_sa); + return 0; + } + + /* For VF device, if there is a perm_addr, then the user will not + * be allowed to change the address. + */ + if (!hns3_is_phys_func(h->pdev) && + !is_zero_ether_addr(netdev->perm_addr)) { + hnae3_format_mac_addr(format_mac_addr_perm, netdev->perm_addr); + hnae3_format_mac_addr(format_mac_addr_sa, mac_addr->sa_data); + netdev_err(netdev, "has permanent MAC %s, user MAC %s not allow\n", + format_mac_addr_perm, format_mac_addr_sa); + return -EPERM; + } + + ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data, false); + if (ret) { + netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret); + return ret; + } + + eth_hw_addr_set(netdev, mac_addr->sa_data); + + return 0; +} + +static int hns3_nic_do_ioctl(struct net_device *netdev, + struct ifreq *ifr, int cmd) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (!netif_running(netdev)) + return -EINVAL; + + if (!h->ae_algo->ops->do_ioctl) + return -EOPNOTSUPP; + + return h->ae_algo->ops->do_ioctl(h, ifr, cmd); +} + +static int hns3_nic_set_features(struct net_device *netdev, + netdev_features_t features) +{ + netdev_features_t changed = netdev->features ^ features; + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + bool enable; + int ret; + + if (changed & (NETIF_F_GRO_HW) && h->ae_algo->ops->set_gro_en) { + enable = !!(features & NETIF_F_GRO_HW); + ret = h->ae_algo->ops->set_gro_en(h, enable); + if (ret) + return ret; + } + + if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && + h->ae_algo->ops->enable_hw_strip_rxvtag) { + enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX); + ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, enable); + if (ret) + return ret; + } + + if ((changed & NETIF_F_NTUPLE) && h->ae_algo->ops->enable_fd) { + enable = !!(features & NETIF_F_NTUPLE); + h->ae_algo->ops->enable_fd(h, enable); + } + + if ((netdev->features & NETIF_F_HW_TC) > (features & NETIF_F_HW_TC) && + h->ae_algo->ops->cls_flower_active(h)) { + netdev_err(netdev, + "there are offloaded TC filters active, cannot disable HW TC offload"); + return -EINVAL; + } + + if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) && + h->ae_algo->ops->enable_vlan_filter) { + enable = !!(features & NETIF_F_HW_VLAN_CTAG_FILTER); + ret = h->ae_algo->ops->enable_vlan_filter(h, enable); + if (ret) + return ret; + } + + netdev->features = features; + return 0; +} + +static netdev_features_t hns3_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ +#define HNS3_MAX_HDR_LEN 480U +#define HNS3_MAX_L4_HDR_LEN 60U + + size_t len; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return features; + + if (skb->encapsulation) + len = skb_inner_transport_header(skb) - skb->data; + else + len = skb_transport_header(skb) - skb->data; + + /* Assume L4 is 60 byte as TCP is the only protocol with a + * a flexible value, and it's max len is 60 bytes. + */ + len += HNS3_MAX_L4_HDR_LEN; + + /* Hardware only supports checksum on the skb with a max header + * len of 480 bytes. + */ + if (len > HNS3_MAX_HDR_LEN) + features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); + + return features; +} + +static void hns3_fetch_stats(struct rtnl_link_stats64 *stats, + struct hns3_enet_ring *ring, bool is_tx) +{ + unsigned int start; + + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + if (is_tx) { + stats->tx_bytes += ring->stats.tx_bytes; + stats->tx_packets += ring->stats.tx_pkts; + stats->tx_dropped += ring->stats.sw_err_cnt; + stats->tx_dropped += ring->stats.tx_vlan_err; + stats->tx_dropped += ring->stats.tx_l4_proto_err; + stats->tx_dropped += ring->stats.tx_l2l3l4_err; + stats->tx_dropped += ring->stats.tx_tso_err; + stats->tx_dropped += ring->stats.over_max_recursion; + stats->tx_dropped += ring->stats.hw_limitation; + stats->tx_dropped += ring->stats.copy_bits_err; + stats->tx_dropped += ring->stats.skb2sgl_err; + stats->tx_dropped += ring->stats.map_sg_err; + stats->tx_errors += ring->stats.sw_err_cnt; + stats->tx_errors += ring->stats.tx_vlan_err; + stats->tx_errors += ring->stats.tx_l4_proto_err; + stats->tx_errors += ring->stats.tx_l2l3l4_err; + stats->tx_errors += ring->stats.tx_tso_err; + stats->tx_errors += ring->stats.over_max_recursion; + stats->tx_errors += ring->stats.hw_limitation; + stats->tx_errors += ring->stats.copy_bits_err; + stats->tx_errors += ring->stats.skb2sgl_err; + stats->tx_errors += ring->stats.map_sg_err; + } else { + stats->rx_bytes += ring->stats.rx_bytes; + stats->rx_packets += ring->stats.rx_pkts; + stats->rx_dropped += ring->stats.l2_err; + stats->rx_errors += ring->stats.l2_err; + stats->rx_errors += ring->stats.l3l4_csum_err; + stats->rx_crc_errors += ring->stats.l2_err; + stats->multicast += ring->stats.rx_multicast; + stats->rx_length_errors += ring->stats.err_pkt_len; + } + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); +} + +static void hns3_nic_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + int queue_num = priv->ae_handle->kinfo.num_tqps; + struct hnae3_handle *handle = priv->ae_handle; + struct rtnl_link_stats64 ring_total_stats; + struct hns3_enet_ring *ring; + unsigned int idx; + + if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) + return; + + handle->ae_algo->ops->update_stats(handle, &netdev->stats); + + memset(&ring_total_stats, 0, sizeof(ring_total_stats)); + for (idx = 0; idx < queue_num; idx++) { + /* fetch the tx stats */ + ring = &priv->ring[idx]; + hns3_fetch_stats(&ring_total_stats, ring, true); + + /* fetch the rx stats */ + ring = &priv->ring[idx + queue_num]; + hns3_fetch_stats(&ring_total_stats, ring, false); + } + + stats->tx_bytes = ring_total_stats.tx_bytes; + stats->tx_packets = ring_total_stats.tx_packets; + stats->rx_bytes = ring_total_stats.rx_bytes; + stats->rx_packets = ring_total_stats.rx_packets; + + stats->rx_errors = ring_total_stats.rx_errors; + stats->multicast = ring_total_stats.multicast; + stats->rx_length_errors = ring_total_stats.rx_length_errors; + stats->rx_crc_errors = ring_total_stats.rx_crc_errors; + stats->rx_missed_errors = netdev->stats.rx_missed_errors; + + stats->tx_errors = ring_total_stats.tx_errors; + stats->rx_dropped = ring_total_stats.rx_dropped; + stats->tx_dropped = ring_total_stats.tx_dropped; + stats->collisions = netdev->stats.collisions; + stats->rx_over_errors = netdev->stats.rx_over_errors; + stats->rx_frame_errors = netdev->stats.rx_frame_errors; + stats->rx_fifo_errors = netdev->stats.rx_fifo_errors; + stats->tx_aborted_errors = netdev->stats.tx_aborted_errors; + stats->tx_carrier_errors = netdev->stats.tx_carrier_errors; + stats->tx_fifo_errors = netdev->stats.tx_fifo_errors; + stats->tx_heartbeat_errors = netdev->stats.tx_heartbeat_errors; + stats->tx_window_errors = netdev->stats.tx_window_errors; + stats->rx_compressed = netdev->stats.rx_compressed; + stats->tx_compressed = netdev->stats.tx_compressed; +} + +static int hns3_setup_tc(struct net_device *netdev, void *type_data) +{ + struct tc_mqprio_qopt_offload *mqprio_qopt = type_data; + struct hnae3_knic_private_info *kinfo; + u8 tc = mqprio_qopt->qopt.num_tc; + u16 mode = mqprio_qopt->mode; + u8 hw = mqprio_qopt->qopt.hw; + struct hnae3_handle *h; + + if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS && + mode == TC_MQPRIO_MODE_CHANNEL) || (!hw && tc == 0))) + return -EOPNOTSUPP; + + if (tc > HNAE3_MAX_TC) + return -EINVAL; + + if (!netdev) + return -EINVAL; + + h = hns3_get_handle(netdev); + kinfo = &h->kinfo; + + netif_dbg(h, drv, netdev, "setup tc: num_tc=%u\n", tc); + + return (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ? + kinfo->dcb_ops->setup_tc(h, mqprio_qopt) : -EOPNOTSUPP; +} + +static int hns3_setup_tc_cls_flower(struct hns3_nic_priv *priv, + struct flow_cls_offload *flow) +{ + int tc = tc_classid_to_hwtc(priv->netdev, flow->classid); + struct hnae3_handle *h = hns3_get_handle(priv->netdev); + + switch (flow->command) { + case FLOW_CLS_REPLACE: + if (h->ae_algo->ops->add_cls_flower) + return h->ae_algo->ops->add_cls_flower(h, flow, tc); + break; + case FLOW_CLS_DESTROY: + if (h->ae_algo->ops->del_cls_flower) + return h->ae_algo->ops->del_cls_flower(h, flow); + break; + default: + break; + } + + return -EOPNOTSUPP; +} + +static int hns3_setup_tc_block_cb(enum tc_setup_type type, void *type_data, + void *cb_priv) +{ + struct hns3_nic_priv *priv = cb_priv; + + if (!tc_cls_can_offload_and_chain0(priv->netdev, type_data)) + return -EOPNOTSUPP; + + switch (type) { + case TC_SETUP_CLSFLOWER: + return hns3_setup_tc_cls_flower(priv, type_data); + default: + return -EOPNOTSUPP; + } +} + +static LIST_HEAD(hns3_block_cb_list); + +static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) +{ + struct hns3_nic_priv *priv = netdev_priv(dev); + int ret; + + switch (type) { + case TC_SETUP_QDISC_MQPRIO: + ret = hns3_setup_tc(dev, type_data); + break; + case TC_SETUP_BLOCK: + ret = flow_block_cb_setup_simple(type_data, + &hns3_block_cb_list, + hns3_setup_tc_block_cb, + priv, priv, true); + break; + default: + return -EOPNOTSUPP; + } + + return ret; +} + +static int hns3_vlan_rx_add_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + int ret = -EIO; + + if (h->ae_algo->ops->set_vlan_filter) + ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false); + + return ret; +} + +static int hns3_vlan_rx_kill_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + int ret = -EIO; + + if (h->ae_algo->ops->set_vlan_filter) + ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true); + + return ret; +} + +static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, + u8 qos, __be16 vlan_proto) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + int ret = -EIO; + + netif_dbg(h, drv, netdev, + "set vf vlan: vf=%d, vlan=%u, qos=%u, vlan_proto=0x%x\n", + vf, vlan, qos, ntohs(vlan_proto)); + + if (h->ae_algo->ops->set_vf_vlan_filter) + ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan, + qos, vlan_proto); + + return ret; +} + +static int hns3_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable) +{ + struct hnae3_handle *handle = hns3_get_handle(netdev); + + if (hns3_nic_resetting(netdev)) + return -EBUSY; + + if (!handle->ae_algo->ops->set_vf_spoofchk) + return -EOPNOTSUPP; + + return handle->ae_algo->ops->set_vf_spoofchk(handle, vf, enable); +} + +static int hns3_set_vf_trust(struct net_device *netdev, int vf, bool enable) +{ + struct hnae3_handle *handle = hns3_get_handle(netdev); + + if (!handle->ae_algo->ops->set_vf_trust) + return -EOPNOTSUPP; + + return handle->ae_algo->ops->set_vf_trust(handle, vf, enable); +} + +static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + int ret; + + if (hns3_nic_resetting(netdev)) + return -EBUSY; + + if (!h->ae_algo->ops->set_mtu) + return -EOPNOTSUPP; + + netif_dbg(h, drv, netdev, + "change mtu from %u to %d\n", netdev->mtu, new_mtu); + + ret = h->ae_algo->ops->set_mtu(h, new_mtu); + if (ret) + netdev_err(netdev, "failed to change MTU in hardware %d\n", + ret); + else + netdev->mtu = new_mtu; + + return ret; +} + +static int hns3_get_timeout_queue(struct net_device *ndev) +{ + int i; + + /* Find the stopped queue the same way the stack does */ + for (i = 0; i < ndev->num_tx_queues; i++) { + struct netdev_queue *q; + unsigned long trans_start; + + q = netdev_get_tx_queue(ndev, i); + trans_start = READ_ONCE(q->trans_start); + if (netif_xmit_stopped(q) && + time_after(jiffies, + (trans_start + ndev->watchdog_timeo))) { +#ifdef CONFIG_BQL + struct dql *dql = &q->dql; + + netdev_info(ndev, "DQL info last_cnt: %u, queued: %u, adj_limit: %u, completed: %u\n", + dql->last_obj_cnt, dql->num_queued, + dql->adj_limit, dql->num_completed); +#endif + netdev_info(ndev, "queue state: 0x%lx, delta msecs: %u\n", + q->state, + jiffies_to_msecs(jiffies - trans_start)); + break; + } + } + + return i; +} + +static void hns3_dump_queue_stats(struct net_device *ndev, + struct hns3_enet_ring *tx_ring, + int timeout_queue) +{ + struct napi_struct *napi = &tx_ring->tqp_vector->napi; + struct hns3_nic_priv *priv = netdev_priv(ndev); + + netdev_info(ndev, + "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, napi state: %lu\n", + priv->tx_timeout_count, timeout_queue, tx_ring->next_to_use, + tx_ring->next_to_clean, napi->state); + + netdev_info(ndev, + "tx_pkts: %llu, tx_bytes: %llu, sw_err_cnt: %llu, tx_pending: %d\n", + tx_ring->stats.tx_pkts, tx_ring->stats.tx_bytes, + tx_ring->stats.sw_err_cnt, tx_ring->pending_buf); + + netdev_info(ndev, + "seg_pkt_cnt: %llu, tx_more: %llu, restart_queue: %llu, tx_busy: %llu\n", + tx_ring->stats.seg_pkt_cnt, tx_ring->stats.tx_more, + tx_ring->stats.restart_queue, tx_ring->stats.tx_busy); + + netdev_info(ndev, "tx_push: %llu, tx_mem_doorbell: %llu\n", + tx_ring->stats.tx_push, tx_ring->stats.tx_mem_doorbell); +} + +static void hns3_dump_queue_reg(struct net_device *ndev, + struct hns3_enet_ring *tx_ring) +{ + netdev_info(ndev, + "BD_NUM: 0x%x HW_HEAD: 0x%x, HW_TAIL: 0x%x, BD_ERR: 0x%x, INT: 0x%x\n", + hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_BD_NUM_REG), + hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_HEAD_REG), + hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_TAIL_REG), + hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_BD_ERR_REG), + readl(tx_ring->tqp_vector->mask_addr)); + netdev_info(ndev, + "RING_EN: 0x%x, TC: 0x%x, FBD_NUM: 0x%x FBD_OFT: 0x%x, EBD_NUM: 0x%x, EBD_OFT: 0x%x\n", + hns3_tqp_read_reg(tx_ring, HNS3_RING_EN_REG), + hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_TC_REG), + hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_FBDNUM_REG), + hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_OFFSET_REG), + hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_EBDNUM_REG), + hns3_tqp_read_reg(tx_ring, + HNS3_RING_TX_RING_EBD_OFFSET_REG)); +} + +static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) +{ + struct hns3_nic_priv *priv = netdev_priv(ndev); + struct hnae3_handle *h = hns3_get_handle(ndev); + struct hns3_enet_ring *tx_ring; + int timeout_queue; + + timeout_queue = hns3_get_timeout_queue(ndev); + if (timeout_queue >= ndev->num_tx_queues) { + netdev_info(ndev, + "no netdev TX timeout queue found, timeout count: %llu\n", + priv->tx_timeout_count); + return false; + } + + priv->tx_timeout_count++; + + tx_ring = &priv->ring[timeout_queue]; + hns3_dump_queue_stats(ndev, tx_ring, timeout_queue); + + /* When mac received many pause frames continuous, it's unable to send + * packets, which may cause tx timeout + */ + if (h->ae_algo->ops->get_mac_stats) { + struct hns3_mac_stats mac_stats; + + h->ae_algo->ops->get_mac_stats(h, &mac_stats); + netdev_info(ndev, "tx_pause_cnt: %llu, rx_pause_cnt: %llu\n", + mac_stats.tx_pause_cnt, mac_stats.rx_pause_cnt); + } + + hns3_dump_queue_reg(ndev, tx_ring); + + return true; +} + +static void hns3_nic_net_timeout(struct net_device *ndev, unsigned int txqueue) +{ + struct hns3_nic_priv *priv = netdev_priv(ndev); + struct hnae3_handle *h = priv->ae_handle; + + if (!hns3_get_tx_timeo_queue_info(ndev)) + return; + + /* request the reset, and let the hclge to determine + * which reset level should be done + */ + if (h->ae_algo->ops->reset_event) + h->ae_algo->ops->reset_event(h->pdev, h); +} + +#ifdef CONFIG_RFS_ACCEL +static int hns3_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, + u16 rxq_index, u32 flow_id) +{ + struct hnae3_handle *h = hns3_get_handle(dev); + struct flow_keys fkeys; + + if (!h->ae_algo->ops->add_arfs_entry) + return -EOPNOTSUPP; + + if (skb->encapsulation) + return -EPROTONOSUPPORT; + + if (!skb_flow_dissect_flow_keys(skb, &fkeys, 0)) + return -EPROTONOSUPPORT; + + if ((fkeys.basic.n_proto != htons(ETH_P_IP) && + fkeys.basic.n_proto != htons(ETH_P_IPV6)) || + (fkeys.basic.ip_proto != IPPROTO_TCP && + fkeys.basic.ip_proto != IPPROTO_UDP)) + return -EPROTONOSUPPORT; + + return h->ae_algo->ops->add_arfs_entry(h, rxq_index, flow_id, &fkeys); +} +#endif + +static int hns3_nic_get_vf_config(struct net_device *ndev, int vf, + struct ifla_vf_info *ivf) +{ + struct hnae3_handle *h = hns3_get_handle(ndev); + + if (!h->ae_algo->ops->get_vf_config) + return -EOPNOTSUPP; + + return h->ae_algo->ops->get_vf_config(h, vf, ivf); +} + +static int hns3_nic_set_vf_link_state(struct net_device *ndev, int vf, + int link_state) +{ + struct hnae3_handle *h = hns3_get_handle(ndev); + + if (!h->ae_algo->ops->set_vf_link_state) + return -EOPNOTSUPP; + + return h->ae_algo->ops->set_vf_link_state(h, vf, link_state); +} + +static int hns3_nic_set_vf_rate(struct net_device *ndev, int vf, + int min_tx_rate, int max_tx_rate) +{ + struct hnae3_handle *h = hns3_get_handle(ndev); + + if (!h->ae_algo->ops->set_vf_rate) + return -EOPNOTSUPP; + + return h->ae_algo->ops->set_vf_rate(h, vf, min_tx_rate, max_tx_rate, + false); +} + +static int hns3_nic_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; + + if (!h->ae_algo->ops->set_vf_mac) + return -EOPNOTSUPP; + + if (is_multicast_ether_addr(mac)) { + hnae3_format_mac_addr(format_mac_addr, mac); + netdev_err(netdev, + "Invalid MAC:%s specified. Could not set MAC\n", + format_mac_addr); + return -EINVAL; + } + + return h->ae_algo->ops->set_vf_mac(h, vf_id, mac); +} + +#define HNS3_INVALID_DSCP 0xff +#define HNS3_DSCP_SHIFT 2 + +static u8 hns3_get_skb_dscp(struct sk_buff *skb) +{ + __be16 protocol = skb->protocol; + u8 dscp = HNS3_INVALID_DSCP; + + if (protocol == htons(ETH_P_8021Q)) + protocol = vlan_get_protocol(skb); + + if (protocol == htons(ETH_P_IP)) + dscp = ipv4_get_dsfield(ip_hdr(skb)) >> HNS3_DSCP_SHIFT; + else if (protocol == htons(ETH_P_IPV6)) + dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> HNS3_DSCP_SHIFT; + + return dscp; +} + +static u16 hns3_nic_select_queue(struct net_device *netdev, + struct sk_buff *skb, + struct net_device *sb_dev) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + u8 dscp; + + if (h->kinfo.tc_map_mode != HNAE3_TC_MAP_MODE_DSCP || + !h->ae_algo->ops->get_dscp_prio) + goto out; + + dscp = hns3_get_skb_dscp(skb); + if (unlikely(dscp >= HNAE3_MAX_DSCP)) + goto out; + + skb->priority = h->kinfo.dscp_prio[dscp]; + if (skb->priority == HNAE3_PRIO_ID_INVALID) + skb->priority = 0; + +out: + return netdev_pick_tx(netdev, skb, sb_dev); +} + +static const struct net_device_ops hns3_nic_netdev_ops = { + .ndo_open = hns3_nic_net_open, + .ndo_stop = hns3_nic_net_stop, + .ndo_start_xmit = hns3_nic_net_xmit, + .ndo_tx_timeout = hns3_nic_net_timeout, + .ndo_set_mac_address = hns3_nic_net_set_mac_address, + .ndo_eth_ioctl = hns3_nic_do_ioctl, + .ndo_change_mtu = hns3_nic_change_mtu, + .ndo_set_features = hns3_nic_set_features, + .ndo_features_check = hns3_features_check, + .ndo_get_stats64 = hns3_nic_get_stats64, + .ndo_setup_tc = hns3_nic_setup_tc, + .ndo_set_rx_mode = hns3_nic_set_rx_mode, + .ndo_vlan_rx_add_vid = hns3_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = hns3_vlan_rx_kill_vid, + .ndo_set_vf_vlan = hns3_ndo_set_vf_vlan, + .ndo_set_vf_spoofchk = hns3_set_vf_spoofchk, + .ndo_set_vf_trust = hns3_set_vf_trust, +#ifdef CONFIG_RFS_ACCEL + .ndo_rx_flow_steer = hns3_rx_flow_steer, +#endif + .ndo_get_vf_config = hns3_nic_get_vf_config, + .ndo_set_vf_link_state = hns3_nic_set_vf_link_state, + .ndo_set_vf_rate = hns3_nic_set_vf_rate, + .ndo_set_vf_mac = hns3_nic_set_vf_mac, + .ndo_select_queue = hns3_nic_select_queue, +}; + +bool hns3_is_phys_func(struct pci_dev *pdev) +{ + u32 dev_id = pdev->device; + + switch (dev_id) { + case HNAE3_DEV_ID_GE: + case HNAE3_DEV_ID_25GE: + case HNAE3_DEV_ID_25GE_RDMA: + case HNAE3_DEV_ID_25GE_RDMA_MACSEC: + case HNAE3_DEV_ID_50GE_RDMA: + case HNAE3_DEV_ID_50GE_RDMA_MACSEC: + case HNAE3_DEV_ID_100G_RDMA_MACSEC: + case HNAE3_DEV_ID_200G_RDMA: + return true; + case HNAE3_DEV_ID_VF: + case HNAE3_DEV_ID_RDMA_DCB_PFC_VF: + return false; + default: + dev_warn(&pdev->dev, "un-recognized pci device-id %u", + dev_id); + } + + return false; +} + +static void hns3_disable_sriov(struct pci_dev *pdev) +{ + /* If our VFs are assigned we cannot shut down SR-IOV + * without causing issues, so just leave the hardware + * available but disabled + */ + if (pci_vfs_assigned(pdev)) { + dev_warn(&pdev->dev, + "disabling driver while VFs are assigned\n"); + return; + } + + pci_disable_sriov(pdev); +} + +/* hns3_probe - Device initialization routine + * @pdev: PCI device information struct + * @ent: entry in hns3_pci_tbl + * + * hns3_probe initializes a PF identified by a pci_dev structure. + * The OS initialization, configuring of the PF private structure, + * and a hardware reset occur. + * + * Returns 0 on success, negative on failure + */ +static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct hnae3_ae_dev *ae_dev; + int ret; + + ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev), GFP_KERNEL); + if (!ae_dev) + return -ENOMEM; + + ae_dev->pdev = pdev; + ae_dev->flag = ent->driver_data; + pci_set_drvdata(pdev, ae_dev); + + ret = hnae3_register_ae_dev(ae_dev); + if (ret) + pci_set_drvdata(pdev, NULL); + + return ret; +} + +/** + * hns3_clean_vf_config + * @pdev: pointer to a pci_dev structure + * @num_vfs: number of VFs allocated + * + * Clean residual vf config after disable sriov + **/ +static void hns3_clean_vf_config(struct pci_dev *pdev, int num_vfs) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); + + if (ae_dev->ops->clean_vf_config) + ae_dev->ops->clean_vf_config(ae_dev, num_vfs); +} + +/* hns3_remove - Device removal routine + * @pdev: PCI device information struct + */ +static void hns3_remove(struct pci_dev *pdev) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); + + if (hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV)) + hns3_disable_sriov(pdev); + + hnae3_unregister_ae_dev(ae_dev); + pci_set_drvdata(pdev, NULL); +} + +/** + * hns3_pci_sriov_configure + * @pdev: pointer to a pci_dev structure + * @num_vfs: number of VFs to allocate + * + * Enable or change the number of VFs. Called when the user updates the number + * of VFs in sysfs. + **/ +static int hns3_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) +{ + int ret; + + if (!(hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))) { + dev_warn(&pdev->dev, "Can not config SRIOV\n"); + return -EINVAL; + } + + if (num_vfs) { + ret = pci_enable_sriov(pdev, num_vfs); + if (ret) + dev_err(&pdev->dev, "SRIOV enable failed %d\n", ret); + else + return num_vfs; + } else if (!pci_vfs_assigned(pdev)) { + int num_vfs_pre = pci_num_vf(pdev); + + pci_disable_sriov(pdev); + hns3_clean_vf_config(pdev, num_vfs_pre); + } else { + dev_warn(&pdev->dev, + "Unable to free VFs because some are assigned to VMs.\n"); + } + + return 0; +} + +static void hns3_shutdown(struct pci_dev *pdev) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); + + hnae3_unregister_ae_dev(ae_dev); + pci_set_drvdata(pdev, NULL); + + if (system_state == SYSTEM_POWER_OFF) + pci_set_power_state(pdev, PCI_D3hot); +} + +static int __maybe_unused hns3_suspend(struct device *dev) +{ + struct hnae3_ae_dev *ae_dev = dev_get_drvdata(dev); + + if (ae_dev && hns3_is_phys_func(ae_dev->pdev)) { + dev_info(dev, "Begin to suspend.\n"); + if (ae_dev->ops && ae_dev->ops->reset_prepare) + ae_dev->ops->reset_prepare(ae_dev, HNAE3_FUNC_RESET); + } + + return 0; +} + +static int __maybe_unused hns3_resume(struct device *dev) +{ + struct hnae3_ae_dev *ae_dev = dev_get_drvdata(dev); + + if (ae_dev && hns3_is_phys_func(ae_dev->pdev)) { + dev_info(dev, "Begin to resume.\n"); + if (ae_dev->ops && ae_dev->ops->reset_done) + ae_dev->ops->reset_done(ae_dev); + } + + return 0; +} + +static pci_ers_result_t hns3_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); + pci_ers_result_t ret; + + dev_info(&pdev->dev, "PCI error detected, state(=%u)!!\n", state); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (!ae_dev || !ae_dev->ops) { + dev_err(&pdev->dev, + "Can't recover - error happened before device initialized\n"); + return PCI_ERS_RESULT_NONE; + } + + if (ae_dev->ops->handle_hw_ras_error) + ret = ae_dev->ops->handle_hw_ras_error(ae_dev); + else + return PCI_ERS_RESULT_NONE; + + return ret; +} + +static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); + const struct hnae3_ae_ops *ops; + enum hnae3_reset_type reset_type; + struct device *dev = &pdev->dev; + + if (!ae_dev || !ae_dev->ops) + return PCI_ERS_RESULT_NONE; + + ops = ae_dev->ops; + /* request the reset */ + if (ops->reset_event && ops->get_reset_level && + ops->set_default_reset_request) { + if (ae_dev->hw_err_reset_req) { + reset_type = ops->get_reset_level(ae_dev, + &ae_dev->hw_err_reset_req); + ops->set_default_reset_request(ae_dev, reset_type); + dev_info(dev, "requesting reset due to PCI error\n"); + ops->reset_event(pdev, NULL); + } + + return PCI_ERS_RESULT_RECOVERED; + } + + return PCI_ERS_RESULT_DISCONNECT; +} + +static void hns3_reset_prepare(struct pci_dev *pdev) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); + + dev_info(&pdev->dev, "FLR prepare\n"); + if (ae_dev && ae_dev->ops && ae_dev->ops->reset_prepare) + ae_dev->ops->reset_prepare(ae_dev, HNAE3_FLR_RESET); +} + +static void hns3_reset_done(struct pci_dev *pdev) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); + + dev_info(&pdev->dev, "FLR done\n"); + if (ae_dev && ae_dev->ops && ae_dev->ops->reset_done) + ae_dev->ops->reset_done(ae_dev); +} + +static const struct pci_error_handlers hns3_err_handler = { + .error_detected = hns3_error_detected, + .slot_reset = hns3_slot_reset, + .reset_prepare = hns3_reset_prepare, + .reset_done = hns3_reset_done, +}; + +static SIMPLE_DEV_PM_OPS(hns3_pm_ops, hns3_suspend, hns3_resume); + +static struct pci_driver hns3_driver = { + .name = hns3_driver_name, + .id_table = hns3_pci_tbl, + .probe = hns3_probe, + .remove = hns3_remove, + .shutdown = hns3_shutdown, + .driver.pm = &hns3_pm_ops, + .sriov_configure = hns3_pci_sriov_configure, + .err_handler = &hns3_err_handler, +}; + +/* set default feature to hns3 */ +static void hns3_set_default_feature(struct net_device *netdev) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + struct pci_dev *pdev = h->pdev; + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); + + netdev->priv_flags |= IFF_UNICAST_FLT; + + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | + NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | + NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_SCTP_CRC | NETIF_F_FRAGLIST; + + if (hnae3_ae_dev_gro_supported(ae_dev)) + netdev->features |= NETIF_F_GRO_HW; + + if (hnae3_ae_dev_fd_supported(ae_dev)) + netdev->features |= NETIF_F_NTUPLE; + + if (test_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, ae_dev->caps)) + netdev->features |= NETIF_F_GSO_UDP_L4; + + if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps)) + netdev->features |= NETIF_F_HW_CSUM; + else + netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + + if (test_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, ae_dev->caps)) + netdev->features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; + + if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) + netdev->features |= NETIF_F_HW_TC; + + netdev->hw_features |= netdev->features; + if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps)) + netdev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; + + netdev->vlan_features |= netdev->features & + ~(NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_GRO_HW | NETIF_F_NTUPLE | + NETIF_F_HW_TC); + + netdev->hw_enc_features |= netdev->vlan_features | NETIF_F_TSO_MANGLEID; + + /* The device_version V3 hardware can't offload the checksum for IP in + * GRE packets, but can do it for NvGRE. So default to disable the + * checksum and GSO offload for GRE. + */ + if (ae_dev->dev_version > HNAE3_DEVICE_VERSION_V2) { + netdev->features &= ~NETIF_F_GSO_GRE; + netdev->features &= ~NETIF_F_GSO_GRE_CSUM; + } +} + +static int hns3_alloc_buffer(struct hns3_enet_ring *ring, + struct hns3_desc_cb *cb) +{ + unsigned int order = hns3_page_order(ring); + struct page *p; + + if (ring->page_pool) { + p = page_pool_dev_alloc_frag(ring->page_pool, + &cb->page_offset, + hns3_buf_size(ring)); + if (unlikely(!p)) + return -ENOMEM; + + cb->priv = p; + cb->buf = page_address(p); + cb->dma = page_pool_get_dma_addr(p); + cb->type = DESC_TYPE_PP_FRAG; + cb->reuse_flag = 0; + return 0; + } + + p = dev_alloc_pages(order); + if (!p) + return -ENOMEM; + + cb->priv = p; + cb->page_offset = 0; + cb->reuse_flag = 0; + cb->buf = page_address(p); + cb->length = hns3_page_size(ring); + cb->type = DESC_TYPE_PAGE; + page_ref_add(p, USHRT_MAX - 1); + cb->pagecnt_bias = USHRT_MAX; + + return 0; +} + +static void hns3_free_buffer(struct hns3_enet_ring *ring, + struct hns3_desc_cb *cb, int budget) +{ + if (cb->type & (DESC_TYPE_SKB | DESC_TYPE_BOUNCE_HEAD | + DESC_TYPE_BOUNCE_ALL | DESC_TYPE_SGL_SKB)) + napi_consume_skb(cb->priv, budget); + else if (!HNAE3_IS_TX_RING(ring)) { + if (cb->type & DESC_TYPE_PAGE && cb->pagecnt_bias) + __page_frag_cache_drain(cb->priv, cb->pagecnt_bias); + else if (cb->type & DESC_TYPE_PP_FRAG) + page_pool_put_full_page(ring->page_pool, cb->priv, + false); + } + memset(cb, 0, sizeof(*cb)); +} + +static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb) +{ + cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0, + cb->length, ring_to_dma_dir(ring)); + + if (unlikely(dma_mapping_error(ring_to_dev(ring), cb->dma))) + return -EIO; + + return 0; +} + +static void hns3_unmap_buffer(struct hns3_enet_ring *ring, + struct hns3_desc_cb *cb) +{ + if (cb->type & (DESC_TYPE_SKB | DESC_TYPE_FRAGLIST_SKB)) + dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length, + ring_to_dma_dir(ring)); + else if ((cb->type & DESC_TYPE_PAGE) && cb->length) + dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length, + ring_to_dma_dir(ring)); + else if (cb->type & (DESC_TYPE_BOUNCE_ALL | DESC_TYPE_BOUNCE_HEAD | + DESC_TYPE_SGL_SKB)) + hns3_tx_spare_reclaim_cb(ring, cb); +} + +static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i) +{ + hns3_unmap_buffer(ring, &ring->desc_cb[i]); + ring->desc[i].addr = 0; + ring->desc_cb[i].refill = 0; +} + +static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i, + int budget) +{ + struct hns3_desc_cb *cb = &ring->desc_cb[i]; + + if (!ring->desc_cb[i].dma) + return; + + hns3_buffer_detach(ring, i); + hns3_free_buffer(ring, cb, budget); +} + +static void hns3_free_buffers(struct hns3_enet_ring *ring) +{ + int i; + + for (i = 0; i < ring->desc_num; i++) + hns3_free_buffer_detach(ring, i, 0); +} + +/* free desc along with its attached buffer */ +static void hns3_free_desc(struct hns3_enet_ring *ring) +{ + int size = ring->desc_num * sizeof(ring->desc[0]); + + hns3_free_buffers(ring); + + if (ring->desc) { + dma_free_coherent(ring_to_dev(ring), size, + ring->desc, ring->desc_dma_addr); + ring->desc = NULL; + } +} + +static int hns3_alloc_desc(struct hns3_enet_ring *ring) +{ + int size = ring->desc_num * sizeof(ring->desc[0]); + + ring->desc = dma_alloc_coherent(ring_to_dev(ring), size, + &ring->desc_dma_addr, GFP_KERNEL); + if (!ring->desc) + return -ENOMEM; + + return 0; +} + +static int hns3_alloc_and_map_buffer(struct hns3_enet_ring *ring, + struct hns3_desc_cb *cb) +{ + int ret; + + ret = hns3_alloc_buffer(ring, cb); + if (ret || ring->page_pool) + goto out; + + ret = hns3_map_buffer(ring, cb); + if (ret) + goto out_with_buf; + + return 0; + +out_with_buf: + hns3_free_buffer(ring, cb, 0); +out: + return ret; +} + +static int hns3_alloc_and_attach_buffer(struct hns3_enet_ring *ring, int i) +{ + int ret = hns3_alloc_and_map_buffer(ring, &ring->desc_cb[i]); + + if (ret) + return ret; + + ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma + + ring->desc_cb[i].page_offset); + ring->desc_cb[i].refill = 1; + + return 0; +} + +/* Allocate memory for raw pkg, and map with dma */ +static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring) +{ + int i, j, ret; + + for (i = 0; i < ring->desc_num; i++) { + ret = hns3_alloc_and_attach_buffer(ring, i); + if (ret) + goto out_buffer_fail; + } + + return 0; + +out_buffer_fail: + for (j = i - 1; j >= 0; j--) + hns3_free_buffer_detach(ring, j, 0); + return ret; +} + +/* detach a in-used buffer and replace with a reserved one */ +static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i, + struct hns3_desc_cb *res_cb) +{ + hns3_unmap_buffer(ring, &ring->desc_cb[i]); + ring->desc_cb[i] = *res_cb; + ring->desc_cb[i].refill = 1; + ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma + + ring->desc_cb[i].page_offset); + ring->desc[i].rx.bd_base_info = 0; +} + +static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i) +{ + ring->desc_cb[i].reuse_flag = 0; + ring->desc_cb[i].refill = 1; + ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma + + ring->desc_cb[i].page_offset); + ring->desc[i].rx.bd_base_info = 0; + + dma_sync_single_for_device(ring_to_dev(ring), + ring->desc_cb[i].dma + ring->desc_cb[i].page_offset, + hns3_buf_size(ring), + DMA_FROM_DEVICE); +} + +static bool hns3_nic_reclaim_desc(struct hns3_enet_ring *ring, + int *bytes, int *pkts, int budget) +{ + /* This smp_load_acquire() pairs with smp_store_release() in + * hns3_tx_doorbell(). + */ + int ltu = smp_load_acquire(&ring->last_to_use); + int ntc = ring->next_to_clean; + struct hns3_desc_cb *desc_cb; + bool reclaimed = false; + struct hns3_desc *desc; + + while (ltu != ntc) { + desc = &ring->desc[ntc]; + + if (le16_to_cpu(desc->tx.bdtp_fe_sc_vld_ra_ri) & + BIT(HNS3_TXD_VLD_B)) + break; + + desc_cb = &ring->desc_cb[ntc]; + + if (desc_cb->type & (DESC_TYPE_SKB | DESC_TYPE_BOUNCE_ALL | + DESC_TYPE_BOUNCE_HEAD | + DESC_TYPE_SGL_SKB)) { + (*pkts)++; + (*bytes) += desc_cb->send_bytes; + } + + /* desc_cb will be cleaned, after hnae3_free_buffer_detach */ + hns3_free_buffer_detach(ring, ntc, budget); + + if (++ntc == ring->desc_num) + ntc = 0; + + /* Issue prefetch for next Tx descriptor */ + prefetch(&ring->desc_cb[ntc]); + reclaimed = true; + } + + if (unlikely(!reclaimed)) + return false; + + /* This smp_store_release() pairs with smp_load_acquire() in + * ring_space called by hns3_nic_net_xmit. + */ + smp_store_release(&ring->next_to_clean, ntc); + + hns3_tx_spare_update(ring); + + return true; +} + +void hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget) +{ + struct net_device *netdev = ring_to_netdev(ring); + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct netdev_queue *dev_queue; + int bytes, pkts; + + bytes = 0; + pkts = 0; + + if (unlikely(!hns3_nic_reclaim_desc(ring, &bytes, &pkts, budget))) + return; + + ring->tqp_vector->tx_group.total_bytes += bytes; + ring->tqp_vector->tx_group.total_packets += pkts; + + u64_stats_update_begin(&ring->syncp); + ring->stats.tx_bytes += bytes; + ring->stats.tx_pkts += pkts; + u64_stats_update_end(&ring->syncp); + + dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index); + netdev_tx_completed_queue(dev_queue, pkts, bytes); + + if (unlikely(netif_carrier_ok(netdev) && + ring_space(ring) > HNS3_MAX_TSO_BD_NUM)) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + if (netif_tx_queue_stopped(dev_queue) && + !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) { + netif_tx_wake_queue(dev_queue); + ring->stats.restart_queue++; + } + } +} + +static int hns3_desc_unused(struct hns3_enet_ring *ring) +{ + int ntc = ring->next_to_clean; + int ntu = ring->next_to_use; + + if (unlikely(ntc == ntu && !ring->desc_cb[ntc].refill)) + return ring->desc_num; + + return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu; +} + +/* Return true if there is any allocation failure */ +static bool hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, + int cleand_count) +{ + struct hns3_desc_cb *desc_cb; + struct hns3_desc_cb res_cbs; + int i, ret; + + for (i = 0; i < cleand_count; i++) { + desc_cb = &ring->desc_cb[ring->next_to_use]; + if (desc_cb->reuse_flag) { + hns3_ring_stats_update(ring, reuse_pg_cnt); + + hns3_reuse_buffer(ring, ring->next_to_use); + } else { + ret = hns3_alloc_and_map_buffer(ring, &res_cbs); + if (ret) { + hns3_ring_stats_update(ring, sw_err_cnt); + + hns3_rl_err(ring_to_netdev(ring), + "alloc rx buffer failed: %d\n", + ret); + + writel(i, ring->tqp->io_base + + HNS3_RING_RX_RING_HEAD_REG); + return true; + } + hns3_replace_buffer(ring, ring->next_to_use, &res_cbs); + + hns3_ring_stats_update(ring, non_reuse_pg); + } + + ring_ptr_move_fw(ring, next_to_use); + } + + writel(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG); + return false; +} + +static bool hns3_can_reuse_page(struct hns3_desc_cb *cb) +{ + return page_count(cb->priv) == cb->pagecnt_bias; +} + +static int hns3_handle_rx_copybreak(struct sk_buff *skb, int i, + struct hns3_enet_ring *ring, + int pull_len, + struct hns3_desc_cb *desc_cb) +{ + struct hns3_desc *desc = &ring->desc[ring->next_to_clean]; + u32 frag_offset = desc_cb->page_offset + pull_len; + int size = le16_to_cpu(desc->rx.size); + u32 frag_size = size - pull_len; + void *frag = napi_alloc_frag(frag_size); + + if (unlikely(!frag)) { + hns3_ring_stats_update(ring, frag_alloc_err); + + hns3_rl_err(ring_to_netdev(ring), + "failed to allocate rx frag\n"); + return -ENOMEM; + } + + desc_cb->reuse_flag = 1; + memcpy(frag, desc_cb->buf + frag_offset, frag_size); + skb_add_rx_frag(skb, i, virt_to_page(frag), + offset_in_page(frag), frag_size, frag_size); + + hns3_ring_stats_update(ring, frag_alloc); + return 0; +} + +static void hns3_nic_reuse_page(struct sk_buff *skb, int i, + struct hns3_enet_ring *ring, int pull_len, + struct hns3_desc_cb *desc_cb) +{ + struct hns3_desc *desc = &ring->desc[ring->next_to_clean]; + u32 frag_offset = desc_cb->page_offset + pull_len; + int size = le16_to_cpu(desc->rx.size); + u32 truesize = hns3_buf_size(ring); + u32 frag_size = size - pull_len; + int ret = 0; + bool reused; + + if (ring->page_pool) { + skb_add_rx_frag(skb, i, desc_cb->priv, frag_offset, + frag_size, truesize); + return; + } + + /* Avoid re-using remote or pfmem page */ + if (unlikely(!dev_page_is_reusable(desc_cb->priv))) + goto out; + + reused = hns3_can_reuse_page(desc_cb); + + /* Rx page can be reused when: + * 1. Rx page is only owned by the driver when page_offset + * is zero, which means 0 @ truesize will be used by + * stack after skb_add_rx_frag() is called, and the rest + * of rx page can be reused by driver. + * Or + * 2. Rx page is only owned by the driver when page_offset + * is non-zero, which means page_offset @ truesize will + * be used by stack after skb_add_rx_frag() is called, + * and 0 @ truesize can be reused by driver. + */ + if ((!desc_cb->page_offset && reused) || + ((desc_cb->page_offset + truesize + truesize) <= + hns3_page_size(ring) && desc_cb->page_offset)) { + desc_cb->page_offset += truesize; + desc_cb->reuse_flag = 1; + } else if (desc_cb->page_offset && reused) { + desc_cb->page_offset = 0; + desc_cb->reuse_flag = 1; + } else if (frag_size <= ring->rx_copybreak) { + ret = hns3_handle_rx_copybreak(skb, i, ring, pull_len, desc_cb); + if (!ret) + return; + } + +out: + desc_cb->pagecnt_bias--; + + if (unlikely(!desc_cb->pagecnt_bias)) { + page_ref_add(desc_cb->priv, USHRT_MAX); + desc_cb->pagecnt_bias = USHRT_MAX; + } + + skb_add_rx_frag(skb, i, desc_cb->priv, frag_offset, + frag_size, truesize); + + if (unlikely(!desc_cb->reuse_flag)) + __page_frag_cache_drain(desc_cb->priv, desc_cb->pagecnt_bias); +} + +static int hns3_gro_complete(struct sk_buff *skb, u32 l234info) +{ + __be16 type = skb->protocol; + struct tcphdr *th; + int depth = 0; + + while (eth_type_vlan(type)) { + struct vlan_hdr *vh; + + if ((depth + VLAN_HLEN) > skb_headlen(skb)) + return -EFAULT; + + vh = (struct vlan_hdr *)(skb->data + depth); + type = vh->h_vlan_encapsulated_proto; + depth += VLAN_HLEN; + } + + skb_set_network_header(skb, depth); + + if (type == htons(ETH_P_IP)) { + const struct iphdr *iph = ip_hdr(skb); + + depth += sizeof(struct iphdr); + skb_set_transport_header(skb, depth); + th = tcp_hdr(skb); + th->check = ~tcp_v4_check(skb->len - depth, iph->saddr, + iph->daddr, 0); + } else if (type == htons(ETH_P_IPV6)) { + const struct ipv6hdr *iph = ipv6_hdr(skb); + + depth += sizeof(struct ipv6hdr); + skb_set_transport_header(skb, depth); + th = tcp_hdr(skb); + th->check = ~tcp_v6_check(skb->len - depth, &iph->saddr, + &iph->daddr, 0); + } else { + hns3_rl_err(skb->dev, + "Error: FW GRO supports only IPv4/IPv6, not 0x%04x, depth: %d\n", + be16_to_cpu(type), depth); + return -EFAULT; + } + + skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count; + if (th->cwr) + skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; + + if (l234info & BIT(HNS3_RXD_GRO_FIXID_B)) + skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID; + + skb->csum_start = (unsigned char *)th - skb->head; + skb->csum_offset = offsetof(struct tcphdr, check); + skb->ip_summed = CHECKSUM_PARTIAL; + + trace_hns3_gro(skb); + + return 0; +} + +static void hns3_checksum_complete(struct hns3_enet_ring *ring, + struct sk_buff *skb, u32 ptype, u16 csum) +{ + if (ptype == HNS3_INVALID_PTYPE || + hns3_rx_ptype_tbl[ptype].ip_summed != CHECKSUM_COMPLETE) + return; + + hns3_ring_stats_update(ring, csum_complete); + skb->ip_summed = CHECKSUM_COMPLETE; + skb->csum = csum_unfold((__force __sum16)csum); +} + +static void hns3_rx_handle_csum(struct sk_buff *skb, u32 l234info, + u32 ol_info, u32 ptype) +{ + int l3_type, l4_type; + int ol4_type; + + if (ptype != HNS3_INVALID_PTYPE) { + skb->csum_level = hns3_rx_ptype_tbl[ptype].csum_level; + skb->ip_summed = hns3_rx_ptype_tbl[ptype].ip_summed; + + return; + } + + ol4_type = hnae3_get_field(ol_info, HNS3_RXD_OL4ID_M, + HNS3_RXD_OL4ID_S); + switch (ol4_type) { + case HNS3_OL4_TYPE_MAC_IN_UDP: + case HNS3_OL4_TYPE_NVGRE: + skb->csum_level = 1; + fallthrough; + case HNS3_OL4_TYPE_NO_TUN: + l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, + HNS3_RXD_L3ID_S); + l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M, + HNS3_RXD_L4ID_S); + /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */ + if ((l3_type == HNS3_L3_TYPE_IPV4 || + l3_type == HNS3_L3_TYPE_IPV6) && + (l4_type == HNS3_L4_TYPE_UDP || + l4_type == HNS3_L4_TYPE_TCP || + l4_type == HNS3_L4_TYPE_SCTP)) + skb->ip_summed = CHECKSUM_UNNECESSARY; + break; + default: + break; + } +} + +static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, + u32 l234info, u32 bd_base_info, u32 ol_info, + u16 csum) +{ + struct net_device *netdev = ring_to_netdev(ring); + struct hns3_nic_priv *priv = netdev_priv(netdev); + u32 ptype = HNS3_INVALID_PTYPE; + + skb->ip_summed = CHECKSUM_NONE; + + skb_checksum_none_assert(skb); + + if (!(netdev->features & NETIF_F_RXCSUM)) + return; + + if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state)) + ptype = hnae3_get_field(ol_info, HNS3_RXD_PTYPE_M, + HNS3_RXD_PTYPE_S); + + hns3_checksum_complete(ring, skb, ptype, csum); + + /* check if hardware has done checksum */ + if (!(bd_base_info & BIT(HNS3_RXD_L3L4P_B))) + return; + + if (unlikely(l234info & (BIT(HNS3_RXD_L3E_B) | BIT(HNS3_RXD_L4E_B) | + BIT(HNS3_RXD_OL3E_B) | + BIT(HNS3_RXD_OL4E_B)))) { + skb->ip_summed = CHECKSUM_NONE; + hns3_ring_stats_update(ring, l3l4_csum_err); + + return; + } + + hns3_rx_handle_csum(skb, l234info, ol_info, ptype); +} + +static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb) +{ + if (skb_has_frag_list(skb)) + napi_gro_flush(&ring->tqp_vector->napi, false); + + napi_gro_receive(&ring->tqp_vector->napi, skb); +} + +static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring, + struct hns3_desc *desc, u32 l234info, + u16 *vlan_tag) +{ + struct hnae3_handle *handle = ring->tqp->handle; + struct pci_dev *pdev = ring->tqp->handle->pdev; + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); + + if (unlikely(ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)) { + *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); + if (!(*vlan_tag & VLAN_VID_MASK)) + *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); + + return (*vlan_tag != 0); + } + +#define HNS3_STRP_OUTER_VLAN 0x1 +#define HNS3_STRP_INNER_VLAN 0x2 +#define HNS3_STRP_BOTH 0x3 + + /* Hardware always insert VLAN tag into RX descriptor when + * remove the tag from packet, driver needs to determine + * reporting which tag to stack. + */ + switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M, + HNS3_RXD_STRP_TAGP_S)) { + case HNS3_STRP_OUTER_VLAN: + if (handle->port_base_vlan_state != + HNAE3_PORT_BASE_VLAN_DISABLE) + return false; + + *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); + return true; + case HNS3_STRP_INNER_VLAN: + if (handle->port_base_vlan_state != + HNAE3_PORT_BASE_VLAN_DISABLE) + return false; + + *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); + return true; + case HNS3_STRP_BOTH: + if (handle->port_base_vlan_state == + HNAE3_PORT_BASE_VLAN_DISABLE) + *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); + else + *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); + + return true; + default: + return false; + } +} + +static void hns3_rx_ring_move_fw(struct hns3_enet_ring *ring) +{ + ring->desc[ring->next_to_clean].rx.bd_base_info &= + cpu_to_le32(~BIT(HNS3_RXD_VLD_B)); + ring->desc_cb[ring->next_to_clean].refill = 0; + ring->next_to_clean += 1; + + if (unlikely(ring->next_to_clean == ring->desc_num)) + ring->next_to_clean = 0; +} + +static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length, + unsigned char *va) +{ + struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean]; + struct net_device *netdev = ring_to_netdev(ring); + struct sk_buff *skb; + + ring->skb = napi_alloc_skb(&ring->tqp_vector->napi, HNS3_RX_HEAD_SIZE); + skb = ring->skb; + if (unlikely(!skb)) { + hns3_rl_err(netdev, "alloc rx skb fail\n"); + hns3_ring_stats_update(ring, sw_err_cnt); + + return -ENOMEM; + } + + trace_hns3_rx_desc(ring); + prefetchw(skb->data); + + ring->pending_buf = 1; + ring->frag_num = 0; + ring->tail_skb = NULL; + if (length <= HNS3_RX_HEAD_SIZE) { + memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long))); + + /* We can reuse buffer as-is, just make sure it is reusable */ + if (dev_page_is_reusable(desc_cb->priv)) + desc_cb->reuse_flag = 1; + else if (desc_cb->type & DESC_TYPE_PP_FRAG) + page_pool_put_full_page(ring->page_pool, desc_cb->priv, + false); + else /* This page cannot be reused so discard it */ + __page_frag_cache_drain(desc_cb->priv, + desc_cb->pagecnt_bias); + + hns3_rx_ring_move_fw(ring); + return 0; + } + + if (ring->page_pool) + skb_mark_for_recycle(skb); + + hns3_ring_stats_update(ring, seg_pkt_cnt); + + ring->pull_len = eth_get_headlen(netdev, va, HNS3_RX_HEAD_SIZE); + __skb_put(skb, ring->pull_len); + hns3_nic_reuse_page(skb, ring->frag_num++, ring, ring->pull_len, + desc_cb); + hns3_rx_ring_move_fw(ring); + + return 0; +} + +static int hns3_add_frag(struct hns3_enet_ring *ring) +{ + struct sk_buff *skb = ring->skb; + struct sk_buff *head_skb = skb; + struct sk_buff *new_skb; + struct hns3_desc_cb *desc_cb; + struct hns3_desc *desc; + u32 bd_base_info; + + do { + desc = &ring->desc[ring->next_to_clean]; + desc_cb = &ring->desc_cb[ring->next_to_clean]; + bd_base_info = le32_to_cpu(desc->rx.bd_base_info); + /* make sure HW write desc complete */ + dma_rmb(); + if (!(bd_base_info & BIT(HNS3_RXD_VLD_B))) + return -ENXIO; + + if (unlikely(ring->frag_num >= MAX_SKB_FRAGS)) { + new_skb = napi_alloc_skb(&ring->tqp_vector->napi, 0); + if (unlikely(!new_skb)) { + hns3_rl_err(ring_to_netdev(ring), + "alloc rx fraglist skb fail\n"); + return -ENXIO; + } + + if (ring->page_pool) + skb_mark_for_recycle(new_skb); + + ring->frag_num = 0; + + if (ring->tail_skb) { + ring->tail_skb->next = new_skb; + ring->tail_skb = new_skb; + } else { + skb_shinfo(skb)->frag_list = new_skb; + ring->tail_skb = new_skb; + } + } + + if (ring->tail_skb) { + head_skb->truesize += hns3_buf_size(ring); + head_skb->data_len += le16_to_cpu(desc->rx.size); + head_skb->len += le16_to_cpu(desc->rx.size); + skb = ring->tail_skb; + } + + dma_sync_single_for_cpu(ring_to_dev(ring), + desc_cb->dma + desc_cb->page_offset, + hns3_buf_size(ring), + DMA_FROM_DEVICE); + + hns3_nic_reuse_page(skb, ring->frag_num++, ring, 0, desc_cb); + trace_hns3_rx_desc(ring); + hns3_rx_ring_move_fw(ring); + ring->pending_buf++; + } while (!(bd_base_info & BIT(HNS3_RXD_FE_B))); + + return 0; +} + +static int hns3_set_gro_and_checksum(struct hns3_enet_ring *ring, + struct sk_buff *skb, u32 l234info, + u32 bd_base_info, u32 ol_info, u16 csum) +{ + struct net_device *netdev = ring_to_netdev(ring); + struct hns3_nic_priv *priv = netdev_priv(netdev); + u32 l3_type; + + skb_shinfo(skb)->gso_size = hnae3_get_field(bd_base_info, + HNS3_RXD_GRO_SIZE_M, + HNS3_RXD_GRO_SIZE_S); + /* if there is no HW GRO, do not set gro params */ + if (!skb_shinfo(skb)->gso_size) { + hns3_rx_checksum(ring, skb, l234info, bd_base_info, ol_info, + csum); + return 0; + } + + NAPI_GRO_CB(skb)->count = hnae3_get_field(l234info, + HNS3_RXD_GRO_COUNT_M, + HNS3_RXD_GRO_COUNT_S); + + if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state)) { + u32 ptype = hnae3_get_field(ol_info, HNS3_RXD_PTYPE_M, + HNS3_RXD_PTYPE_S); + + l3_type = hns3_rx_ptype_tbl[ptype].l3_type; + } else { + l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, + HNS3_RXD_L3ID_S); + } + + if (l3_type == HNS3_L3_TYPE_IPV4) + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; + else if (l3_type == HNS3_L3_TYPE_IPV6) + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; + else + return -EFAULT; + + return hns3_gro_complete(skb, l234info); +} + +static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring, + struct sk_buff *skb, u32 rss_hash, + u32 l234info, u32 ol_info) +{ + enum pkt_hash_types rss_type = PKT_HASH_TYPE_NONE; + struct net_device *netdev = ring_to_netdev(ring); + struct hns3_nic_priv *priv = netdev_priv(netdev); + + if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state)) { + u32 ptype = hnae3_get_field(ol_info, HNS3_RXD_PTYPE_M, + HNS3_RXD_PTYPE_S); + + rss_type = hns3_rx_ptype_tbl[ptype].hash_type; + } else { + int l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, + HNS3_RXD_L3ID_S); + int l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M, + HNS3_RXD_L4ID_S); + + if (l3_type == HNS3_L3_TYPE_IPV4 || + l3_type == HNS3_L3_TYPE_IPV6) { + if (l4_type == HNS3_L4_TYPE_UDP || + l4_type == HNS3_L4_TYPE_TCP || + l4_type == HNS3_L4_TYPE_SCTP) + rss_type = PKT_HASH_TYPE_L4; + else if (l4_type == HNS3_L4_TYPE_IGMP || + l4_type == HNS3_L4_TYPE_ICMP) + rss_type = PKT_HASH_TYPE_L3; + } + } + + skb_set_hash(skb, rss_hash, rss_type); +} + +static void hns3_handle_rx_ts_info(struct net_device *netdev, + struct hns3_desc *desc, struct sk_buff *skb, + u32 bd_base_info) +{ + if (unlikely(bd_base_info & BIT(HNS3_RXD_TS_VLD_B))) { + struct hnae3_handle *h = hns3_get_handle(netdev); + u32 nsec = le32_to_cpu(desc->ts_nsec); + u32 sec = le32_to_cpu(desc->ts_sec); + + if (h->ae_algo->ops->get_rx_hwts) + h->ae_algo->ops->get_rx_hwts(h, skb, nsec, sec); + } +} + +static void hns3_handle_rx_vlan_tag(struct hns3_enet_ring *ring, + struct hns3_desc *desc, struct sk_buff *skb, + u32 l234info) +{ + struct net_device *netdev = ring_to_netdev(ring); + + /* Based on hw strategy, the tag offloaded will be stored at + * ot_vlan_tag in two layer tag case, and stored at vlan_tag + * in one layer tag case. + */ + if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) { + u16 vlan_tag; + + if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag)) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + vlan_tag); + } +} + +static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb) +{ + struct net_device *netdev = ring_to_netdev(ring); + enum hns3_pkt_l2t_type l2_frame_type; + u32 bd_base_info, l234info, ol_info; + struct hns3_desc *desc; + unsigned int len; + int pre_ntc, ret; + u16 csum; + + /* bdinfo handled below is only valid on the last BD of the + * current packet, and ring->next_to_clean indicates the first + * descriptor of next packet, so need - 1 below. + */ + pre_ntc = ring->next_to_clean ? (ring->next_to_clean - 1) : + (ring->desc_num - 1); + desc = &ring->desc[pre_ntc]; + bd_base_info = le32_to_cpu(desc->rx.bd_base_info); + l234info = le32_to_cpu(desc->rx.l234_info); + ol_info = le32_to_cpu(desc->rx.ol_info); + csum = le16_to_cpu(desc->csum); + + hns3_handle_rx_ts_info(netdev, desc, skb, bd_base_info); + + hns3_handle_rx_vlan_tag(ring, desc, skb, l234info); + + if (unlikely(!desc->rx.pkt_len || (l234info & (BIT(HNS3_RXD_TRUNCAT_B) | + BIT(HNS3_RXD_L2E_B))))) { + u64_stats_update_begin(&ring->syncp); + if (l234info & BIT(HNS3_RXD_L2E_B)) + ring->stats.l2_err++; + else + ring->stats.err_pkt_len++; + u64_stats_update_end(&ring->syncp); + + return -EFAULT; + } + + len = skb->len; + + /* Do update ip stack process */ + skb->protocol = eth_type_trans(skb, netdev); + + /* This is needed in order to enable forwarding support */ + ret = hns3_set_gro_and_checksum(ring, skb, l234info, + bd_base_info, ol_info, csum); + if (unlikely(ret)) { + hns3_ring_stats_update(ring, rx_err_cnt); + return ret; + } + + l2_frame_type = hnae3_get_field(l234info, HNS3_RXD_DMAC_M, + HNS3_RXD_DMAC_S); + + u64_stats_update_begin(&ring->syncp); + ring->stats.rx_pkts++; + ring->stats.rx_bytes += len; + + if (l2_frame_type == HNS3_L2_TYPE_MULTICAST) + ring->stats.rx_multicast++; + + u64_stats_update_end(&ring->syncp); + + ring->tqp_vector->rx_group.total_bytes += len; + + hns3_set_rx_skb_rss_type(ring, skb, le32_to_cpu(desc->rx.rss_hash), + l234info, ol_info); + return 0; +} + +static int hns3_handle_rx_bd(struct hns3_enet_ring *ring) +{ + struct sk_buff *skb = ring->skb; + struct hns3_desc_cb *desc_cb; + struct hns3_desc *desc; + unsigned int length; + u32 bd_base_info; + int ret; + + desc = &ring->desc[ring->next_to_clean]; + desc_cb = &ring->desc_cb[ring->next_to_clean]; + + prefetch(desc); + + if (!skb) { + bd_base_info = le32_to_cpu(desc->rx.bd_base_info); + /* Check valid BD */ + if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B)))) + return -ENXIO; + + dma_rmb(); + length = le16_to_cpu(desc->rx.size); + + ring->va = desc_cb->buf + desc_cb->page_offset; + + dma_sync_single_for_cpu(ring_to_dev(ring), + desc_cb->dma + desc_cb->page_offset, + hns3_buf_size(ring), + DMA_FROM_DEVICE); + + /* Prefetch first cache line of first page. + * Idea is to cache few bytes of the header of the packet. + * Our L1 Cache line size is 64B so need to prefetch twice to make + * it 128B. But in actual we can have greater size of caches with + * 128B Level 1 cache lines. In such a case, single fetch would + * suffice to cache in the relevant part of the header. + */ + net_prefetch(ring->va); + + ret = hns3_alloc_skb(ring, length, ring->va); + skb = ring->skb; + + if (ret < 0) /* alloc buffer fail */ + return ret; + if (!(bd_base_info & BIT(HNS3_RXD_FE_B))) { /* need add frag */ + ret = hns3_add_frag(ring); + if (ret) + return ret; + } + } else { + ret = hns3_add_frag(ring); + if (ret) + return ret; + } + + /* As the head data may be changed when GRO enable, copy + * the head data in after other data rx completed + */ + if (skb->len > HNS3_RX_HEAD_SIZE) + memcpy(skb->data, ring->va, + ALIGN(ring->pull_len, sizeof(long))); + + ret = hns3_handle_bdinfo(ring, skb); + if (unlikely(ret)) { + dev_kfree_skb_any(skb); + return ret; + } + + skb_record_rx_queue(skb, ring->tqp->tqp_index); + return 0; +} + +int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget, + void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *)) +{ +#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16 + int unused_count = hns3_desc_unused(ring); + bool failure = false; + int recv_pkts = 0; + int err; + + unused_count -= ring->pending_buf; + + while (recv_pkts < budget) { + /* Reuse or realloc buffers */ + if (unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) { + failure = failure || + hns3_nic_alloc_rx_buffers(ring, unused_count); + unused_count = 0; + } + + /* Poll one pkt */ + err = hns3_handle_rx_bd(ring); + /* Do not get FE for the packet or failed to alloc skb */ + if (unlikely(!ring->skb || err == -ENXIO)) { + goto out; + } else if (likely(!err)) { + rx_fn(ring, ring->skb); + recv_pkts++; + } + + unused_count += ring->pending_buf; + ring->skb = NULL; + ring->pending_buf = 0; + } + +out: + /* sync head pointer before exiting, since hardware will calculate + * FBD number with head pointer + */ + if (unused_count > 0) + failure = failure || + hns3_nic_alloc_rx_buffers(ring, unused_count); + + return failure ? budget : recv_pkts; +} + +static void hns3_update_rx_int_coalesce(struct hns3_enet_tqp_vector *tqp_vector) +{ + struct hns3_enet_ring_group *rx_group = &tqp_vector->rx_group; + struct dim_sample sample = {}; + + if (!rx_group->coal.adapt_enable) + return; + + dim_update_sample(tqp_vector->event_cnt, rx_group->total_packets, + rx_group->total_bytes, &sample); + net_dim(&rx_group->dim, sample); +} + +static void hns3_update_tx_int_coalesce(struct hns3_enet_tqp_vector *tqp_vector) +{ + struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group; + struct dim_sample sample = {}; + + if (!tx_group->coal.adapt_enable) + return; + + dim_update_sample(tqp_vector->event_cnt, tx_group->total_packets, + tx_group->total_bytes, &sample); + net_dim(&tx_group->dim, sample); +} + +static int hns3_nic_common_poll(struct napi_struct *napi, int budget) +{ + struct hns3_nic_priv *priv = netdev_priv(napi->dev); + struct hns3_enet_ring *ring; + int rx_pkt_total = 0; + + struct hns3_enet_tqp_vector *tqp_vector = + container_of(napi, struct hns3_enet_tqp_vector, napi); + bool clean_complete = true; + int rx_budget = budget; + + if (unlikely(test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) { + napi_complete(napi); + return 0; + } + + /* Since the actual Tx work is minimal, we can give the Tx a larger + * budget and be more aggressive about cleaning up the Tx descriptors. + */ + hns3_for_each_ring(ring, tqp_vector->tx_group) + hns3_clean_tx_ring(ring, budget); + + /* make sure rx ring budget not smaller than 1 */ + if (tqp_vector->num_tqps > 1) + rx_budget = max(budget / tqp_vector->num_tqps, 1); + + hns3_for_each_ring(ring, tqp_vector->rx_group) { + int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget, + hns3_rx_skb); + if (rx_cleaned >= rx_budget) + clean_complete = false; + + rx_pkt_total += rx_cleaned; + } + + tqp_vector->rx_group.total_packets += rx_pkt_total; + + if (!clean_complete) + return budget; + + if (napi_complete(napi) && + likely(!test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) { + hns3_update_rx_int_coalesce(tqp_vector); + hns3_update_tx_int_coalesce(tqp_vector); + + hns3_mask_vector_irq(tqp_vector, 1); + } + + return rx_pkt_total; +} + +static int hns3_create_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, + struct hnae3_ring_chain_node **head, + bool is_tx) +{ + u32 bit_value = is_tx ? HNAE3_RING_TYPE_TX : HNAE3_RING_TYPE_RX; + u32 field_value = is_tx ? HNAE3_RING_GL_TX : HNAE3_RING_GL_RX; + struct hnae3_ring_chain_node *cur_chain = *head; + struct pci_dev *pdev = tqp_vector->handle->pdev; + struct hnae3_ring_chain_node *chain; + struct hns3_enet_ring *ring; + + ring = is_tx ? tqp_vector->tx_group.ring : tqp_vector->rx_group.ring; + + if (cur_chain) { + while (cur_chain->next) + cur_chain = cur_chain->next; + } + + while (ring) { + chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL); + if (!chain) + return -ENOMEM; + if (cur_chain) + cur_chain->next = chain; + else + *head = chain; + chain->tqp_index = ring->tqp->tqp_index; + hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B, + bit_value); + hnae3_set_field(chain->int_gl_idx, + HNAE3_RING_GL_IDX_M, + HNAE3_RING_GL_IDX_S, field_value); + + cur_chain = chain; + + ring = ring->next; + } + + return 0; +} + +static struct hnae3_ring_chain_node * +hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector) +{ + struct pci_dev *pdev = tqp_vector->handle->pdev; + struct hnae3_ring_chain_node *cur_chain = NULL; + struct hnae3_ring_chain_node *chain; + + if (hns3_create_ring_chain(tqp_vector, &cur_chain, true)) + goto err_free_chain; + + if (hns3_create_ring_chain(tqp_vector, &cur_chain, false)) + goto err_free_chain; + + return cur_chain; + +err_free_chain: + while (cur_chain) { + chain = cur_chain->next; + devm_kfree(&pdev->dev, cur_chain); + cur_chain = chain; + } + + return NULL; +} + +static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, + struct hnae3_ring_chain_node *head) +{ + struct pci_dev *pdev = tqp_vector->handle->pdev; + struct hnae3_ring_chain_node *chain_tmp, *chain; + + chain = head; + + while (chain) { + chain_tmp = chain->next; + devm_kfree(&pdev->dev, chain); + chain = chain_tmp; + } +} + +static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group, + struct hns3_enet_ring *ring) +{ + ring->next = group->ring; + group->ring = ring; + + group->count++; +} + +static void hns3_nic_set_cpumask(struct hns3_nic_priv *priv) +{ + struct pci_dev *pdev = priv->ae_handle->pdev; + struct hns3_enet_tqp_vector *tqp_vector; + int num_vectors = priv->vector_num; + int numa_node; + int vector_i; + + numa_node = dev_to_node(&pdev->dev); + + for (vector_i = 0; vector_i < num_vectors; vector_i++) { + tqp_vector = &priv->tqp_vector[vector_i]; + cpumask_set_cpu(cpumask_local_spread(vector_i, numa_node), + &tqp_vector->affinity_mask); + } +} + +static void hns3_rx_dim_work(struct work_struct *work) +{ + struct dim *dim = container_of(work, struct dim, work); + struct hns3_enet_ring_group *group = container_of(dim, + struct hns3_enet_ring_group, dim); + struct hns3_enet_tqp_vector *tqp_vector = group->ring->tqp_vector; + struct dim_cq_moder cur_moder = + net_dim_get_rx_moderation(dim->mode, dim->profile_ix); + + hns3_set_vector_coalesce_rx_gl(group->ring->tqp_vector, cur_moder.usec); + tqp_vector->rx_group.coal.int_gl = cur_moder.usec; + + if (cur_moder.pkts < tqp_vector->rx_group.coal.int_ql_max) { + hns3_set_vector_coalesce_rx_ql(tqp_vector, cur_moder.pkts); + tqp_vector->rx_group.coal.int_ql = cur_moder.pkts; + } + + dim->state = DIM_START_MEASURE; +} + +static void hns3_tx_dim_work(struct work_struct *work) +{ + struct dim *dim = container_of(work, struct dim, work); + struct hns3_enet_ring_group *group = container_of(dim, + struct hns3_enet_ring_group, dim); + struct hns3_enet_tqp_vector *tqp_vector = group->ring->tqp_vector; + struct dim_cq_moder cur_moder = + net_dim_get_tx_moderation(dim->mode, dim->profile_ix); + + hns3_set_vector_coalesce_tx_gl(tqp_vector, cur_moder.usec); + tqp_vector->tx_group.coal.int_gl = cur_moder.usec; + + if (cur_moder.pkts < tqp_vector->tx_group.coal.int_ql_max) { + hns3_set_vector_coalesce_tx_ql(tqp_vector, cur_moder.pkts); + tqp_vector->tx_group.coal.int_ql = cur_moder.pkts; + } + + dim->state = DIM_START_MEASURE; +} + +static void hns3_nic_init_dim(struct hns3_enet_tqp_vector *tqp_vector) +{ + INIT_WORK(&tqp_vector->rx_group.dim.work, hns3_rx_dim_work); + INIT_WORK(&tqp_vector->tx_group.dim.work, hns3_tx_dim_work); +} + +static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv) +{ + struct hnae3_handle *h = priv->ae_handle; + struct hns3_enet_tqp_vector *tqp_vector; + int ret; + int i; + + hns3_nic_set_cpumask(priv); + + for (i = 0; i < priv->vector_num; i++) { + tqp_vector = &priv->tqp_vector[i]; + hns3_vector_coalesce_init_hw(tqp_vector, priv); + tqp_vector->num_tqps = 0; + hns3_nic_init_dim(tqp_vector); + } + + for (i = 0; i < h->kinfo.num_tqps; i++) { + u16 vector_i = i % priv->vector_num; + u16 tqp_num = h->kinfo.num_tqps; + + tqp_vector = &priv->tqp_vector[vector_i]; + + hns3_add_ring_to_group(&tqp_vector->tx_group, + &priv->ring[i]); + + hns3_add_ring_to_group(&tqp_vector->rx_group, + &priv->ring[i + tqp_num]); + + priv->ring[i].tqp_vector = tqp_vector; + priv->ring[i + tqp_num].tqp_vector = tqp_vector; + tqp_vector->num_tqps++; + } + + for (i = 0; i < priv->vector_num; i++) { + struct hnae3_ring_chain_node *vector_ring_chain; + + tqp_vector = &priv->tqp_vector[i]; + + tqp_vector->rx_group.total_bytes = 0; + tqp_vector->rx_group.total_packets = 0; + tqp_vector->tx_group.total_bytes = 0; + tqp_vector->tx_group.total_packets = 0; + tqp_vector->handle = h; + + vector_ring_chain = hns3_get_vector_ring_chain(tqp_vector); + if (!vector_ring_chain) { + ret = -ENOMEM; + goto map_ring_fail; + } + + ret = h->ae_algo->ops->map_ring_to_vector(h, + tqp_vector->vector_irq, vector_ring_chain); + + hns3_free_vector_ring_chain(tqp_vector, vector_ring_chain); + + if (ret) + goto map_ring_fail; + + netif_napi_add(priv->netdev, &tqp_vector->napi, + hns3_nic_common_poll); + } + + return 0; + +map_ring_fail: + while (i--) + netif_napi_del(&priv->tqp_vector[i].napi); + + return ret; +} + +static void hns3_nic_init_coal_cfg(struct hns3_nic_priv *priv) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev); + struct hns3_enet_coalesce *tx_coal = &priv->tx_coal; + struct hns3_enet_coalesce *rx_coal = &priv->rx_coal; + + /* initialize the configuration for interrupt coalescing. + * 1. GL (Interrupt Gap Limiter) + * 2. RL (Interrupt Rate Limiter) + * 3. QL (Interrupt Quantity Limiter) + * + * Default: enable interrupt coalescing self-adaptive and GL + */ + tx_coal->adapt_enable = 1; + rx_coal->adapt_enable = 1; + + tx_coal->int_gl = HNS3_INT_GL_50K; + rx_coal->int_gl = HNS3_INT_GL_50K; + + rx_coal->flow_level = HNS3_FLOW_LOW; + tx_coal->flow_level = HNS3_FLOW_LOW; + + if (ae_dev->dev_specs.int_ql_max) { + tx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG; + rx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG; + } +} + +static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv) +{ + struct hnae3_handle *h = priv->ae_handle; + struct hns3_enet_tqp_vector *tqp_vector; + struct hnae3_vector_info *vector; + struct pci_dev *pdev = h->pdev; + u16 tqp_num = h->kinfo.num_tqps; + u16 vector_num; + int ret = 0; + u16 i; + + /* RSS size, cpu online and vector_num should be the same */ + /* Should consider 2p/4p later */ + vector_num = min_t(u16, num_online_cpus(), tqp_num); + + vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector), + GFP_KERNEL); + if (!vector) + return -ENOMEM; + + /* save the actual available vector number */ + vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector); + + priv->vector_num = vector_num; + priv->tqp_vector = (struct hns3_enet_tqp_vector *) + devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector), + GFP_KERNEL); + if (!priv->tqp_vector) { + ret = -ENOMEM; + goto out; + } + + for (i = 0; i < priv->vector_num; i++) { + tqp_vector = &priv->tqp_vector[i]; + tqp_vector->idx = i; + tqp_vector->mask_addr = vector[i].io_addr; + tqp_vector->vector_irq = vector[i].vector; + hns3_vector_coalesce_init(tqp_vector, priv); + } + +out: + devm_kfree(&pdev->dev, vector); + return ret; +} + +static void hns3_clear_ring_group(struct hns3_enet_ring_group *group) +{ + group->ring = NULL; + group->count = 0; +} + +static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv) +{ + struct hnae3_ring_chain_node *vector_ring_chain; + struct hnae3_handle *h = priv->ae_handle; + struct hns3_enet_tqp_vector *tqp_vector; + int i; + + for (i = 0; i < priv->vector_num; i++) { + tqp_vector = &priv->tqp_vector[i]; + + if (!tqp_vector->rx_group.ring && !tqp_vector->tx_group.ring) + continue; + + /* Since the mapping can be overwritten, when fail to get the + * chain between vector and ring, we should go on to deal with + * the remaining options. + */ + vector_ring_chain = hns3_get_vector_ring_chain(tqp_vector); + if (!vector_ring_chain) + dev_warn(priv->dev, "failed to get ring chain\n"); + + h->ae_algo->ops->unmap_ring_from_vector(h, + tqp_vector->vector_irq, vector_ring_chain); + + hns3_free_vector_ring_chain(tqp_vector, vector_ring_chain); + + hns3_clear_ring_group(&tqp_vector->rx_group); + hns3_clear_ring_group(&tqp_vector->tx_group); + netif_napi_del(&priv->tqp_vector[i].napi); + } +} + +static void hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv) +{ + struct hnae3_handle *h = priv->ae_handle; + struct pci_dev *pdev = h->pdev; + int i, ret; + + for (i = 0; i < priv->vector_num; i++) { + struct hns3_enet_tqp_vector *tqp_vector; + + tqp_vector = &priv->tqp_vector[i]; + ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq); + if (ret) + return; + } + + devm_kfree(&pdev->dev, priv->tqp_vector); +} + +static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv, + unsigned int ring_type) +{ + int queue_num = priv->ae_handle->kinfo.num_tqps; + struct hns3_enet_ring *ring; + int desc_num; + + if (ring_type == HNAE3_RING_TYPE_TX) { + ring = &priv->ring[q->tqp_index]; + desc_num = priv->ae_handle->kinfo.num_tx_desc; + ring->queue_index = q->tqp_index; + ring->tx_copybreak = priv->tx_copybreak; + ring->last_to_use = 0; + } else { + ring = &priv->ring[q->tqp_index + queue_num]; + desc_num = priv->ae_handle->kinfo.num_rx_desc; + ring->queue_index = q->tqp_index; + ring->rx_copybreak = priv->rx_copybreak; + } + + hnae3_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type); + + ring->tqp = q; + ring->desc = NULL; + ring->desc_cb = NULL; + ring->dev = priv->dev; + ring->desc_dma_addr = 0; + ring->buf_size = q->buf_size; + ring->desc_num = desc_num; + ring->next_to_use = 0; + ring->next_to_clean = 0; +} + +static void hns3_queue_to_ring(struct hnae3_queue *tqp, + struct hns3_nic_priv *priv) +{ + hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX); + hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX); +} + +static int hns3_get_ring_config(struct hns3_nic_priv *priv) +{ + struct hnae3_handle *h = priv->ae_handle; + struct pci_dev *pdev = h->pdev; + int i; + + priv->ring = devm_kzalloc(&pdev->dev, + array3_size(h->kinfo.num_tqps, + sizeof(*priv->ring), 2), + GFP_KERNEL); + if (!priv->ring) + return -ENOMEM; + + for (i = 0; i < h->kinfo.num_tqps; i++) + hns3_queue_to_ring(h->kinfo.tqp[i], priv); + + return 0; +} + +static void hns3_put_ring_config(struct hns3_nic_priv *priv) +{ + if (!priv->ring) + return; + + devm_kfree(priv->dev, priv->ring); + priv->ring = NULL; +} + +static void hns3_alloc_page_pool(struct hns3_enet_ring *ring) +{ + struct page_pool_params pp_params = { + .flags = PP_FLAG_DMA_MAP | PP_FLAG_PAGE_FRAG | + PP_FLAG_DMA_SYNC_DEV, + .order = hns3_page_order(ring), + .pool_size = ring->desc_num * hns3_buf_size(ring) / + (PAGE_SIZE << hns3_page_order(ring)), + .nid = dev_to_node(ring_to_dev(ring)), + .dev = ring_to_dev(ring), + .dma_dir = DMA_FROM_DEVICE, + .offset = 0, + .max_len = PAGE_SIZE << hns3_page_order(ring), + }; + + ring->page_pool = page_pool_create(&pp_params); + if (IS_ERR(ring->page_pool)) { + dev_warn(ring_to_dev(ring), "page pool creation failed: %ld\n", + PTR_ERR(ring->page_pool)); + ring->page_pool = NULL; + } +} + +static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring) +{ + int ret; + + if (ring->desc_num <= 0 || ring->buf_size <= 0) + return -EINVAL; + + ring->desc_cb = devm_kcalloc(ring_to_dev(ring), ring->desc_num, + sizeof(ring->desc_cb[0]), GFP_KERNEL); + if (!ring->desc_cb) { + ret = -ENOMEM; + goto out; + } + + ret = hns3_alloc_desc(ring); + if (ret) + goto out_with_desc_cb; + + if (!HNAE3_IS_TX_RING(ring)) { + if (page_pool_enabled) + hns3_alloc_page_pool(ring); + + ret = hns3_alloc_ring_buffers(ring); + if (ret) + goto out_with_desc; + } else { + hns3_init_tx_spare_buffer(ring); + } + + return 0; + +out_with_desc: + hns3_free_desc(ring); +out_with_desc_cb: + devm_kfree(ring_to_dev(ring), ring->desc_cb); + ring->desc_cb = NULL; +out: + return ret; +} + +void hns3_fini_ring(struct hns3_enet_ring *ring) +{ + hns3_free_desc(ring); + devm_kfree(ring_to_dev(ring), ring->desc_cb); + ring->desc_cb = NULL; + ring->next_to_clean = 0; + ring->next_to_use = 0; + ring->last_to_use = 0; + ring->pending_buf = 0; + if (!HNAE3_IS_TX_RING(ring) && ring->skb) { + dev_kfree_skb_any(ring->skb); + ring->skb = NULL; + } else if (HNAE3_IS_TX_RING(ring) && ring->tx_spare) { + struct hns3_tx_spare *tx_spare = ring->tx_spare; + + dma_unmap_page(ring_to_dev(ring), tx_spare->dma, tx_spare->len, + DMA_TO_DEVICE); + free_pages((unsigned long)tx_spare->buf, + get_order(tx_spare->len)); + devm_kfree(ring_to_dev(ring), tx_spare); + ring->tx_spare = NULL; + } + + if (!HNAE3_IS_TX_RING(ring) && ring->page_pool) { + page_pool_destroy(ring->page_pool); + ring->page_pool = NULL; + } +} + +static int hns3_buf_size2type(u32 buf_size) +{ + int bd_size_type; + + switch (buf_size) { + case 512: + bd_size_type = HNS3_BD_SIZE_512_TYPE; + break; + case 1024: + bd_size_type = HNS3_BD_SIZE_1024_TYPE; + break; + case 2048: + bd_size_type = HNS3_BD_SIZE_2048_TYPE; + break; + case 4096: + bd_size_type = HNS3_BD_SIZE_4096_TYPE; + break; + default: + bd_size_type = HNS3_BD_SIZE_2048_TYPE; + } + + return bd_size_type; +} + +static void hns3_init_ring_hw(struct hns3_enet_ring *ring) +{ + dma_addr_t dma = ring->desc_dma_addr; + struct hnae3_queue *q = ring->tqp; + + if (!HNAE3_IS_TX_RING(ring)) { + hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG, (u32)dma); + hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG, + (u32)((dma >> 31) >> 1)); + + hns3_write_dev(q, HNS3_RING_RX_RING_BD_LEN_REG, + hns3_buf_size2type(ring->buf_size)); + hns3_write_dev(q, HNS3_RING_RX_RING_BD_NUM_REG, + ring->desc_num / 8 - 1); + } else { + hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_L_REG, + (u32)dma); + hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG, + (u32)((dma >> 31) >> 1)); + + hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG, + ring->desc_num / 8 - 1); + } +} + +static void hns3_init_tx_ring_tc(struct hns3_nic_priv *priv) +{ + struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo; + struct hnae3_tc_info *tc_info = &kinfo->tc_info; + int i; + + for (i = 0; i < tc_info->num_tc; i++) { + int j; + + for (j = 0; j < tc_info->tqp_count[i]; j++) { + struct hnae3_queue *q; + + q = priv->ring[tc_info->tqp_offset[i] + j].tqp; + hns3_write_dev(q, HNS3_RING_TX_RING_TC_REG, i); + } + } +} + +int hns3_init_all_ring(struct hns3_nic_priv *priv) +{ + struct hnae3_handle *h = priv->ae_handle; + int ring_num = h->kinfo.num_tqps * 2; + int i, j; + int ret; + + for (i = 0; i < ring_num; i++) { + ret = hns3_alloc_ring_memory(&priv->ring[i]); + if (ret) { + dev_err(priv->dev, + "Alloc ring memory fail! ret=%d\n", ret); + goto out_when_alloc_ring_memory; + } + + u64_stats_init(&priv->ring[i].syncp); + } + + return 0; + +out_when_alloc_ring_memory: + for (j = i - 1; j >= 0; j--) + hns3_fini_ring(&priv->ring[j]); + + return -ENOMEM; +} + +static void hns3_uninit_all_ring(struct hns3_nic_priv *priv) +{ + struct hnae3_handle *h = priv->ae_handle; + int i; + + for (i = 0; i < h->kinfo.num_tqps; i++) { + hns3_fini_ring(&priv->ring[i]); + hns3_fini_ring(&priv->ring[i + h->kinfo.num_tqps]); + } +} + +/* Set mac addr if it is configured. or leave it to the AE driver */ +static int hns3_init_mac_addr(struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; + struct hnae3_handle *h = priv->ae_handle; + u8 mac_addr_temp[ETH_ALEN] = {0}; + int ret = 0; + + if (h->ae_algo->ops->get_mac_addr) + h->ae_algo->ops->get_mac_addr(h, mac_addr_temp); + + /* Check if the MAC address is valid, if not get a random one */ + if (!is_valid_ether_addr(mac_addr_temp)) { + eth_hw_addr_random(netdev); + hnae3_format_mac_addr(format_mac_addr, netdev->dev_addr); + dev_warn(priv->dev, "using random MAC address %s\n", + format_mac_addr); + } else if (!ether_addr_equal(netdev->dev_addr, mac_addr_temp)) { + eth_hw_addr_set(netdev, mac_addr_temp); + ether_addr_copy(netdev->perm_addr, mac_addr_temp); + } else { + return 0; + } + + if (h->ae_algo->ops->set_mac_addr) + ret = h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr, true); + + return ret; +} + +static int hns3_init_phy(struct net_device *netdev) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + int ret = 0; + + if (h->ae_algo->ops->mac_connect_phy) + ret = h->ae_algo->ops->mac_connect_phy(h); + + return ret; +} + +static void hns3_uninit_phy(struct net_device *netdev) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (h->ae_algo->ops->mac_disconnect_phy) + h->ae_algo->ops->mac_disconnect_phy(h); +} + +static int hns3_client_start(struct hnae3_handle *handle) +{ + if (!handle->ae_algo->ops->client_start) + return 0; + + return handle->ae_algo->ops->client_start(handle); +} + +static void hns3_client_stop(struct hnae3_handle *handle) +{ + if (!handle->ae_algo->ops->client_stop) + return; + + handle->ae_algo->ops->client_stop(handle); +} + +static void hns3_info_show(struct hns3_nic_priv *priv) +{ + struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo; + char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; + + hnae3_format_mac_addr(format_mac_addr, priv->netdev->dev_addr); + dev_info(priv->dev, "MAC address: %s\n", format_mac_addr); + dev_info(priv->dev, "Task queue pairs numbers: %u\n", kinfo->num_tqps); + dev_info(priv->dev, "RSS size: %u\n", kinfo->rss_size); + dev_info(priv->dev, "Allocated RSS size: %u\n", kinfo->req_rss_size); + dev_info(priv->dev, "RX buffer length: %u\n", kinfo->rx_buf_len); + dev_info(priv->dev, "Desc num per TX queue: %u\n", kinfo->num_tx_desc); + dev_info(priv->dev, "Desc num per RX queue: %u\n", kinfo->num_rx_desc); + dev_info(priv->dev, "Total number of enabled TCs: %u\n", + kinfo->tc_info.num_tc); + dev_info(priv->dev, "Max mtu size: %u\n", priv->netdev->max_mtu); +} + +static void hns3_set_cq_period_mode(struct hns3_nic_priv *priv, + enum dim_cq_period_mode mode, bool is_tx) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev); + struct hnae3_handle *handle = priv->ae_handle; + int i; + + if (is_tx) { + priv->tx_cqe_mode = mode; + + for (i = 0; i < priv->vector_num; i++) + priv->tqp_vector[i].tx_group.dim.mode = mode; + } else { + priv->rx_cqe_mode = mode; + + for (i = 0; i < priv->vector_num; i++) + priv->tqp_vector[i].rx_group.dim.mode = mode; + } + + if (hnae3_ae_dev_cq_supported(ae_dev)) { + u32 new_mode; + u64 reg; + + new_mode = (mode == DIM_CQ_PERIOD_MODE_START_FROM_CQE) ? + HNS3_CQ_MODE_CQE : HNS3_CQ_MODE_EQE; + reg = is_tx ? HNS3_GL1_CQ_MODE_REG : HNS3_GL0_CQ_MODE_REG; + + writel(new_mode, handle->kinfo.io_base + reg); + } +} + +void hns3_cq_period_mode_init(struct hns3_nic_priv *priv, + enum dim_cq_period_mode tx_mode, + enum dim_cq_period_mode rx_mode) +{ + hns3_set_cq_period_mode(priv, tx_mode, true); + hns3_set_cq_period_mode(priv, rx_mode, false); +} + +static void hns3_state_init(struct hnae3_handle *handle) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); + struct net_device *netdev = handle->kinfo.netdev; + struct hns3_nic_priv *priv = netdev_priv(netdev); + + set_bit(HNS3_NIC_STATE_INITED, &priv->state); + + if (test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps)) + set_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state); + + if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) + set_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->supported_pflags); + + if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps)) + set_bit(HNS3_NIC_STATE_HW_TX_CSUM_ENABLE, &priv->state); + + if (hnae3_ae_dev_rxd_adv_layout_supported(ae_dev)) + set_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state); +} + +static void hns3_state_uninit(struct hnae3_handle *handle) +{ + struct hns3_nic_priv *priv = handle->priv; + + clear_bit(HNS3_NIC_STATE_INITED, &priv->state); +} + +static int hns3_client_init(struct hnae3_handle *handle) +{ + struct pci_dev *pdev = handle->pdev; + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); + u16 alloc_tqps, max_rss_size; + struct hns3_nic_priv *priv; + struct net_device *netdev; + int ret; + + handle->ae_algo->ops->get_tqps_and_rss_info(handle, &alloc_tqps, + &max_rss_size); + netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv), alloc_tqps); + if (!netdev) + return -ENOMEM; + + priv = netdev_priv(netdev); + priv->dev = &pdev->dev; + priv->netdev = netdev; + priv->ae_handle = handle; + priv->tx_timeout_count = 0; + priv->max_non_tso_bd_num = ae_dev->dev_specs.max_non_tso_bd_num; + set_bit(HNS3_NIC_STATE_DOWN, &priv->state); + + handle->msg_enable = netif_msg_init(debug, DEFAULT_MSG_LEVEL); + + handle->kinfo.netdev = netdev; + handle->priv = (void *)priv; + + hns3_init_mac_addr(netdev); + + hns3_set_default_feature(netdev); + + netdev->watchdog_timeo = HNS3_TX_TIMEOUT; + netdev->priv_flags |= IFF_UNICAST_FLT; + netdev->netdev_ops = &hns3_nic_netdev_ops; + SET_NETDEV_DEV(netdev, &pdev->dev); + hns3_ethtool_set_ops(netdev); + + /* Carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + + ret = hns3_get_ring_config(priv); + if (ret) { + ret = -ENOMEM; + goto out_get_ring_cfg; + } + + hns3_nic_init_coal_cfg(priv); + + ret = hns3_nic_alloc_vector_data(priv); + if (ret) { + ret = -ENOMEM; + goto out_alloc_vector_data; + } + + ret = hns3_nic_init_vector_data(priv); + if (ret) { + ret = -ENOMEM; + goto out_init_vector_data; + } + + ret = hns3_init_all_ring(priv); + if (ret) { + ret = -ENOMEM; + goto out_init_ring; + } + + hns3_cq_period_mode_init(priv, DIM_CQ_PERIOD_MODE_START_FROM_EQE, + DIM_CQ_PERIOD_MODE_START_FROM_EQE); + + ret = hns3_init_phy(netdev); + if (ret) + goto out_init_phy; + + /* the device can work without cpu rmap, only aRFS needs it */ + ret = hns3_set_rx_cpu_rmap(netdev); + if (ret) + dev_warn(priv->dev, "set rx cpu rmap fail, ret=%d\n", ret); + + ret = hns3_nic_init_irq(priv); + if (ret) { + dev_err(priv->dev, "init irq failed! ret=%d\n", ret); + hns3_free_rx_cpu_rmap(netdev); + goto out_init_irq_fail; + } + + ret = hns3_client_start(handle); + if (ret) { + dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret); + goto out_client_start; + } + + hns3_dcbnl_setup(handle); + + ret = hns3_dbg_init(handle); + if (ret) { + dev_err(priv->dev, "failed to init debugfs, ret = %d\n", + ret); + goto out_client_start; + } + + netdev->max_mtu = HNS3_MAX_MTU(ae_dev->dev_specs.max_frm_size); + + hns3_state_init(handle); + + ret = register_netdev(netdev); + if (ret) { + dev_err(priv->dev, "probe register netdev fail!\n"); + goto out_reg_netdev_fail; + } + + if (netif_msg_drv(handle)) + hns3_info_show(priv); + + return ret; + +out_reg_netdev_fail: + hns3_state_uninit(handle); + hns3_dbg_uninit(handle); + hns3_client_stop(handle); +out_client_start: + hns3_free_rx_cpu_rmap(netdev); + hns3_nic_uninit_irq(priv); +out_init_irq_fail: + hns3_uninit_phy(netdev); +out_init_phy: + hns3_uninit_all_ring(priv); +out_init_ring: + hns3_nic_uninit_vector_data(priv); +out_init_vector_data: + hns3_nic_dealloc_vector_data(priv); +out_alloc_vector_data: + priv->ring = NULL; +out_get_ring_cfg: + priv->ae_handle = NULL; + free_netdev(netdev); + return ret; +} + +static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) +{ + struct net_device *netdev = handle->kinfo.netdev; + struct hns3_nic_priv *priv = netdev_priv(netdev); + + if (netdev->reg_state != NETREG_UNINITIALIZED) + unregister_netdev(netdev); + + hns3_client_stop(handle); + + hns3_uninit_phy(netdev); + + if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) { + netdev_warn(netdev, "already uninitialized\n"); + goto out_netdev_free; + } + + hns3_free_rx_cpu_rmap(netdev); + + hns3_nic_uninit_irq(priv); + + hns3_clear_all_ring(handle, true); + + hns3_nic_uninit_vector_data(priv); + + hns3_nic_dealloc_vector_data(priv); + + hns3_uninit_all_ring(priv); + + hns3_put_ring_config(priv); + +out_netdev_free: + hns3_dbg_uninit(handle); + free_netdev(netdev); +} + +static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup) +{ + struct net_device *netdev = handle->kinfo.netdev; + + if (!netdev) + return; + + if (linkup) { + netif_tx_wake_all_queues(netdev); + netif_carrier_on(netdev); + if (netif_msg_link(handle)) + netdev_info(netdev, "link up\n"); + } else { + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + if (netif_msg_link(handle)) + netdev_info(netdev, "link down\n"); + } +} + +static void hns3_clear_tx_ring(struct hns3_enet_ring *ring) +{ + while (ring->next_to_clean != ring->next_to_use) { + ring->desc[ring->next_to_clean].tx.bdtp_fe_sc_vld_ra_ri = 0; + hns3_free_buffer_detach(ring, ring->next_to_clean, 0); + ring_ptr_move_fw(ring, next_to_clean); + } + + ring->pending_buf = 0; +} + +static int hns3_clear_rx_ring(struct hns3_enet_ring *ring) +{ + struct hns3_desc_cb res_cbs; + int ret; + + while (ring->next_to_use != ring->next_to_clean) { + /* When a buffer is not reused, it's memory has been + * freed in hns3_handle_rx_bd or will be freed by + * stack, so we need to replace the buffer here. + */ + if (!ring->desc_cb[ring->next_to_use].reuse_flag) { + ret = hns3_alloc_and_map_buffer(ring, &res_cbs); + if (ret) { + hns3_ring_stats_update(ring, sw_err_cnt); + /* if alloc new buffer fail, exit directly + * and reclear in up flow. + */ + netdev_warn(ring_to_netdev(ring), + "reserve buffer map failed, ret = %d\n", + ret); + return ret; + } + hns3_replace_buffer(ring, ring->next_to_use, &res_cbs); + } + ring_ptr_move_fw(ring, next_to_use); + } + + /* Free the pending skb in rx ring */ + if (ring->skb) { + dev_kfree_skb_any(ring->skb); + ring->skb = NULL; + ring->pending_buf = 0; + } + + return 0; +} + +static void hns3_force_clear_rx_ring(struct hns3_enet_ring *ring) +{ + while (ring->next_to_use != ring->next_to_clean) { + /* When a buffer is not reused, it's memory has been + * freed in hns3_handle_rx_bd or will be freed by + * stack, so only need to unmap the buffer here. + */ + if (!ring->desc_cb[ring->next_to_use].reuse_flag) { + hns3_unmap_buffer(ring, + &ring->desc_cb[ring->next_to_use]); + ring->desc_cb[ring->next_to_use].dma = 0; + } + + ring_ptr_move_fw(ring, next_to_use); + } +} + +static void hns3_clear_all_ring(struct hnae3_handle *h, bool force) +{ + struct net_device *ndev = h->kinfo.netdev; + struct hns3_nic_priv *priv = netdev_priv(ndev); + u32 i; + + for (i = 0; i < h->kinfo.num_tqps; i++) { + struct hns3_enet_ring *ring; + + ring = &priv->ring[i]; + hns3_clear_tx_ring(ring); + + ring = &priv->ring[i + h->kinfo.num_tqps]; + /* Continue to clear other rings even if clearing some + * rings failed. + */ + if (force) + hns3_force_clear_rx_ring(ring); + else + hns3_clear_rx_ring(ring); + } +} + +int hns3_nic_reset_all_ring(struct hnae3_handle *h) +{ + struct net_device *ndev = h->kinfo.netdev; + struct hns3_nic_priv *priv = netdev_priv(ndev); + struct hns3_enet_ring *rx_ring; + int i, j; + int ret; + + ret = h->ae_algo->ops->reset_queue(h); + if (ret) + return ret; + + for (i = 0; i < h->kinfo.num_tqps; i++) { + hns3_init_ring_hw(&priv->ring[i]); + + /* We need to clear tx ring here because self test will + * use the ring and will not run down before up + */ + hns3_clear_tx_ring(&priv->ring[i]); + priv->ring[i].next_to_clean = 0; + priv->ring[i].next_to_use = 0; + priv->ring[i].last_to_use = 0; + + rx_ring = &priv->ring[i + h->kinfo.num_tqps]; + hns3_init_ring_hw(rx_ring); + ret = hns3_clear_rx_ring(rx_ring); + if (ret) + return ret; + + /* We can not know the hardware head and tail when this + * function is called in reset flow, so we reuse all desc. + */ + for (j = 0; j < rx_ring->desc_num; j++) + hns3_reuse_buffer(rx_ring, j); + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + } + + hns3_init_tx_ring_tc(priv); + + return 0; +} + +static int hns3_reset_notify_down_enet(struct hnae3_handle *handle) +{ + struct hnae3_knic_private_info *kinfo = &handle->kinfo; + struct net_device *ndev = kinfo->netdev; + struct hns3_nic_priv *priv = netdev_priv(ndev); + + if (test_and_set_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) + return 0; + + if (!netif_running(ndev)) + return 0; + + return hns3_nic_net_stop(ndev); +} + +static int hns3_reset_notify_up_enet(struct hnae3_handle *handle) +{ + struct hnae3_knic_private_info *kinfo = &handle->kinfo; + struct hns3_nic_priv *priv = netdev_priv(kinfo->netdev); + int ret = 0; + + if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state)) { + netdev_err(kinfo->netdev, "device is not initialized yet\n"); + return -EFAULT; + } + + clear_bit(HNS3_NIC_STATE_RESETTING, &priv->state); + + if (netif_running(kinfo->netdev)) { + ret = hns3_nic_net_open(kinfo->netdev); + if (ret) { + set_bit(HNS3_NIC_STATE_RESETTING, &priv->state); + netdev_err(kinfo->netdev, + "net up fail, ret=%d!\n", ret); + return ret; + } + } + + return ret; +} + +static int hns3_reset_notify_init_enet(struct hnae3_handle *handle) +{ + struct net_device *netdev = handle->kinfo.netdev; + struct hns3_nic_priv *priv = netdev_priv(netdev); + int ret; + + /* Carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + + ret = hns3_get_ring_config(priv); + if (ret) + return ret; + + ret = hns3_nic_alloc_vector_data(priv); + if (ret) + goto err_put_ring; + + ret = hns3_nic_init_vector_data(priv); + if (ret) + goto err_dealloc_vector; + + ret = hns3_init_all_ring(priv); + if (ret) + goto err_uninit_vector; + + hns3_cq_period_mode_init(priv, priv->tx_cqe_mode, priv->rx_cqe_mode); + + /* the device can work without cpu rmap, only aRFS needs it */ + ret = hns3_set_rx_cpu_rmap(netdev); + if (ret) + dev_warn(priv->dev, "set rx cpu rmap fail, ret=%d\n", ret); + + ret = hns3_nic_init_irq(priv); + if (ret) { + dev_err(priv->dev, "init irq failed! ret=%d\n", ret); + hns3_free_rx_cpu_rmap(netdev); + goto err_init_irq_fail; + } + + if (!hns3_is_phys_func(handle->pdev)) + hns3_init_mac_addr(netdev); + + ret = hns3_client_start(handle); + if (ret) { + dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret); + goto err_client_start_fail; + } + + set_bit(HNS3_NIC_STATE_INITED, &priv->state); + + return ret; + +err_client_start_fail: + hns3_free_rx_cpu_rmap(netdev); + hns3_nic_uninit_irq(priv); +err_init_irq_fail: + hns3_uninit_all_ring(priv); +err_uninit_vector: + hns3_nic_uninit_vector_data(priv); +err_dealloc_vector: + hns3_nic_dealloc_vector_data(priv); +err_put_ring: + hns3_put_ring_config(priv); + + return ret; +} + +static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) +{ + struct net_device *netdev = handle->kinfo.netdev; + struct hns3_nic_priv *priv = netdev_priv(netdev); + + if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) { + netdev_warn(netdev, "already uninitialized\n"); + return 0; + } + + hns3_free_rx_cpu_rmap(netdev); + hns3_nic_uninit_irq(priv); + hns3_clear_all_ring(handle, true); + hns3_reset_tx_queue(priv->ae_handle); + + hns3_nic_uninit_vector_data(priv); + + hns3_nic_dealloc_vector_data(priv); + + hns3_uninit_all_ring(priv); + + hns3_put_ring_config(priv); + + return 0; +} + +int hns3_reset_notify(struct hnae3_handle *handle, + enum hnae3_reset_notify_type type) +{ + int ret = 0; + + switch (type) { + case HNAE3_UP_CLIENT: + ret = hns3_reset_notify_up_enet(handle); + break; + case HNAE3_DOWN_CLIENT: + ret = hns3_reset_notify_down_enet(handle); + break; + case HNAE3_INIT_CLIENT: + ret = hns3_reset_notify_init_enet(handle); + break; + case HNAE3_UNINIT_CLIENT: + ret = hns3_reset_notify_uninit_enet(handle); + break; + default: + break; + } + + return ret; +} + +static int hns3_change_channels(struct hnae3_handle *handle, u32 new_tqp_num, + bool rxfh_configured) +{ + int ret; + + ret = handle->ae_algo->ops->set_channels(handle, new_tqp_num, + rxfh_configured); + if (ret) { + dev_err(&handle->pdev->dev, + "Change tqp num(%u) fail.\n", new_tqp_num); + return ret; + } + + ret = hns3_reset_notify(handle, HNAE3_INIT_CLIENT); + if (ret) + return ret; + + ret = hns3_reset_notify(handle, HNAE3_UP_CLIENT); + if (ret) + hns3_reset_notify(handle, HNAE3_UNINIT_CLIENT); + + return ret; +} + +int hns3_set_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + struct hnae3_knic_private_info *kinfo = &h->kinfo; + bool rxfh_configured = netif_is_rxfh_configured(netdev); + u32 new_tqp_num = ch->combined_count; + u16 org_tqp_num; + int ret; + + if (hns3_nic_resetting(netdev)) + return -EBUSY; + + if (ch->rx_count || ch->tx_count) + return -EINVAL; + + if (kinfo->tc_info.mqprio_active) { + dev_err(&netdev->dev, + "it's not allowed to set channels via ethtool when MQPRIO mode is on\n"); + return -EINVAL; + } + + if (new_tqp_num > hns3_get_max_available_channels(h) || + new_tqp_num < 1) { + dev_err(&netdev->dev, + "Change tqps fail, the tqp range is from 1 to %u", + hns3_get_max_available_channels(h)); + return -EINVAL; + } + + if (kinfo->rss_size == new_tqp_num) + return 0; + + netif_dbg(h, drv, netdev, + "set channels: tqp_num=%u, rxfh=%d\n", + new_tqp_num, rxfh_configured); + + ret = hns3_reset_notify(h, HNAE3_DOWN_CLIENT); + if (ret) + return ret; + + ret = hns3_reset_notify(h, HNAE3_UNINIT_CLIENT); + if (ret) + return ret; + + org_tqp_num = h->kinfo.num_tqps; + ret = hns3_change_channels(h, new_tqp_num, rxfh_configured); + if (ret) { + int ret1; + + netdev_warn(netdev, + "Change channels fail, revert to old value\n"); + ret1 = hns3_change_channels(h, org_tqp_num, rxfh_configured); + if (ret1) { + netdev_err(netdev, + "revert to old channel fail\n"); + return ret1; + } + + return ret; + } + + return 0; +} + +void hns3_external_lb_prepare(struct net_device *ndev, bool if_running) +{ + struct hns3_nic_priv *priv = netdev_priv(ndev); + struct hnae3_handle *h = priv->ae_handle; + int i; + + if (!if_running) + return; + + if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state)) + return; + + netif_carrier_off(ndev); + netif_tx_disable(ndev); + + for (i = 0; i < priv->vector_num; i++) + hns3_vector_disable(&priv->tqp_vector[i]); + + for (i = 0; i < h->kinfo.num_tqps; i++) + hns3_tqp_disable(h->kinfo.tqp[i]); + + /* delay ring buffer clearing to hns3_reset_notify_uninit_enet + * during reset process, because driver may not be able + * to disable the ring through firmware when downing the netdev. + */ + if (!hns3_nic_resetting(ndev)) + hns3_nic_reset_all_ring(priv->ae_handle); + + hns3_reset_tx_queue(priv->ae_handle); +} + +void hns3_external_lb_restore(struct net_device *ndev, bool if_running) +{ + struct hns3_nic_priv *priv = netdev_priv(ndev); + struct hnae3_handle *h = priv->ae_handle; + int i; + + if (!if_running) + return; + + if (hns3_nic_resetting(ndev)) + return; + + if (!test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) + return; + + if (hns3_nic_reset_all_ring(priv->ae_handle)) + return; + + clear_bit(HNS3_NIC_STATE_DOWN, &priv->state); + + for (i = 0; i < priv->vector_num; i++) + hns3_vector_enable(&priv->tqp_vector[i]); + + for (i = 0; i < h->kinfo.num_tqps; i++) + hns3_tqp_enable(h->kinfo.tqp[i]); + + netif_tx_wake_all_queues(ndev); + + if (h->ae_algo->ops->get_status(h)) + netif_carrier_on(ndev); +} + +static const struct hns3_hw_error_info hns3_hw_err[] = { + { .type = HNAE3_PPU_POISON_ERROR, + .msg = "PPU poison" }, + { .type = HNAE3_CMDQ_ECC_ERROR, + .msg = "IMP CMDQ error" }, + { .type = HNAE3_IMP_RD_POISON_ERROR, + .msg = "IMP RD poison" }, + { .type = HNAE3_ROCEE_AXI_RESP_ERROR, + .msg = "ROCEE AXI RESP error" }, +}; + +static void hns3_process_hw_error(struct hnae3_handle *handle, + enum hnae3_hw_error_type type) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(hns3_hw_err); i++) { + if (hns3_hw_err[i].type == type) { + dev_err(&handle->pdev->dev, "Detected %s!\n", + hns3_hw_err[i].msg); + break; + } + } +} + +static const struct hnae3_client_ops client_ops = { + .init_instance = hns3_client_init, + .uninit_instance = hns3_client_uninit, + .link_status_change = hns3_link_status_change, + .reset_notify = hns3_reset_notify, + .process_hw_error = hns3_process_hw_error, +}; + +/* hns3_init_module - Driver registration routine + * hns3_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + */ +static int __init hns3_init_module(void) +{ + int ret; + + pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string); + pr_info("%s: %s\n", hns3_driver_name, hns3_copyright); + + client.type = HNAE3_CLIENT_KNIC; + snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH, "%s", + hns3_driver_name); + + client.ops = &client_ops; + + INIT_LIST_HEAD(&client.node); + + hns3_dbg_register_debugfs(hns3_driver_name); + + ret = hnae3_register_client(&client); + if (ret) + goto err_reg_client; + + ret = pci_register_driver(&hns3_driver); + if (ret) + goto err_reg_driver; + + return ret; + +err_reg_driver: + hnae3_unregister_client(&client); +err_reg_client: + hns3_dbg_unregister_debugfs(); + return ret; +} +module_init(hns3_init_module); + +/* hns3_exit_module - Driver exit cleanup routine + * hns3_exit_module is called just before the driver is removed + * from memory. + */ +static void __exit hns3_exit_module(void) +{ + pci_unregister_driver(&hns3_driver); + hnae3_unregister_client(&client); + hns3_dbg_unregister_debugfs(); +} +module_exit(hns3_exit_module); + +MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver"); +MODULE_AUTHOR("Huawei Tech. Co., Ltd."); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("pci:hns-nic"); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h new file mode 100644 index 000000000..294a14b4f --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -0,0 +1,751 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +// Copyright (c) 2016-2017 Hisilicon Limited. + +#ifndef __HNS3_ENET_H +#define __HNS3_ENET_H + +#include +#include +#include +#include + +#include "hnae3.h" + +struct iphdr; +struct ipv6hdr; + +enum hns3_nic_state { + HNS3_NIC_STATE_TESTING, + HNS3_NIC_STATE_RESETTING, + HNS3_NIC_STATE_INITED, + HNS3_NIC_STATE_DOWN, + HNS3_NIC_STATE_DISABLED, + HNS3_NIC_STATE_REMOVING, + HNS3_NIC_STATE_SERVICE_INITED, + HNS3_NIC_STATE_SERVICE_SCHED, + HNS3_NIC_STATE2_RESET_REQUESTED, + HNS3_NIC_STATE_HW_TX_CSUM_ENABLE, + HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, + HNS3_NIC_STATE_TX_PUSH_ENABLE, + HNS3_NIC_STATE_MAX +}; + +#define HNS3_MAX_PUSH_BD_NUM 2 + +#define HNS3_RING_RX_RING_BASEADDR_L_REG 0x00000 +#define HNS3_RING_RX_RING_BASEADDR_H_REG 0x00004 +#define HNS3_RING_RX_RING_BD_NUM_REG 0x00008 +#define HNS3_RING_RX_RING_BD_LEN_REG 0x0000C +#define HNS3_RING_RX_RING_TAIL_REG 0x00018 +#define HNS3_RING_RX_RING_HEAD_REG 0x0001C +#define HNS3_RING_RX_RING_FBDNUM_REG 0x00020 +#define HNS3_RING_RX_RING_PKTNUM_RECORD_REG 0x0002C + +#define HNS3_RING_TX_RING_BASEADDR_L_REG 0x00040 +#define HNS3_RING_TX_RING_BASEADDR_H_REG 0x00044 +#define HNS3_RING_TX_RING_BD_NUM_REG 0x00048 +#define HNS3_RING_TX_RING_TC_REG 0x00050 +#define HNS3_RING_TX_RING_TAIL_REG 0x00058 +#define HNS3_RING_TX_RING_HEAD_REG 0x0005C +#define HNS3_RING_TX_RING_FBDNUM_REG 0x00060 +#define HNS3_RING_TX_RING_OFFSET_REG 0x00064 +#define HNS3_RING_TX_RING_EBDNUM_REG 0x00068 +#define HNS3_RING_TX_RING_PKTNUM_RECORD_REG 0x0006C +#define HNS3_RING_TX_RING_EBD_OFFSET_REG 0x00070 +#define HNS3_RING_TX_RING_BD_ERR_REG 0x00074 +#define HNS3_RING_EN_REG 0x00090 +#define HNS3_RING_RX_EN_REG 0x00098 +#define HNS3_RING_TX_EN_REG 0x000D4 + +#define HNS3_RX_HEAD_SIZE 256 + +#define HNS3_TX_TIMEOUT (5 * HZ) +#define HNS3_RING_NAME_LEN 16 +#define HNS3_BUFFER_SIZE_2048 2048 +#define HNS3_RING_MAX_PENDING 32760 +#define HNS3_RING_MIN_PENDING 72 +#define HNS3_RING_BD_MULTIPLE 8 +/* max frame size of mac */ +#define HNS3_MAX_MTU(max_frm_size) \ + ((max_frm_size) - (ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN)) + +#define HNS3_BD_SIZE_512_TYPE 0 +#define HNS3_BD_SIZE_1024_TYPE 1 +#define HNS3_BD_SIZE_2048_TYPE 2 +#define HNS3_BD_SIZE_4096_TYPE 3 + +#define HNS3_RX_FLAG_VLAN_PRESENT 0x1 +#define HNS3_RX_FLAG_L3ID_IPV4 0x0 +#define HNS3_RX_FLAG_L3ID_IPV6 0x1 +#define HNS3_RX_FLAG_L4ID_UDP 0x0 +#define HNS3_RX_FLAG_L4ID_TCP 0x1 + +#define HNS3_RXD_DMAC_S 0 +#define HNS3_RXD_DMAC_M (0x3 << HNS3_RXD_DMAC_S) +#define HNS3_RXD_VLAN_S 2 +#define HNS3_RXD_VLAN_M (0x3 << HNS3_RXD_VLAN_S) +#define HNS3_RXD_L3ID_S 4 +#define HNS3_RXD_L3ID_M (0xf << HNS3_RXD_L3ID_S) +#define HNS3_RXD_L4ID_S 8 +#define HNS3_RXD_L4ID_M (0xf << HNS3_RXD_L4ID_S) +#define HNS3_RXD_FRAG_B 12 +#define HNS3_RXD_STRP_TAGP_S 13 +#define HNS3_RXD_STRP_TAGP_M (0x3 << HNS3_RXD_STRP_TAGP_S) + +#define HNS3_RXD_L2E_B 16 +#define HNS3_RXD_L3E_B 17 +#define HNS3_RXD_L4E_B 18 +#define HNS3_RXD_TRUNCAT_B 19 +#define HNS3_RXD_HOI_B 20 +#define HNS3_RXD_DOI_B 21 +#define HNS3_RXD_OL3E_B 22 +#define HNS3_RXD_OL4E_B 23 +#define HNS3_RXD_GRO_COUNT_S 24 +#define HNS3_RXD_GRO_COUNT_M (0x3f << HNS3_RXD_GRO_COUNT_S) +#define HNS3_RXD_GRO_FIXID_B 30 +#define HNS3_RXD_GRO_ECN_B 31 + +#define HNS3_RXD_ODMAC_S 0 +#define HNS3_RXD_ODMAC_M (0x3 << HNS3_RXD_ODMAC_S) +#define HNS3_RXD_OVLAN_S 2 +#define HNS3_RXD_OVLAN_M (0x3 << HNS3_RXD_OVLAN_S) +#define HNS3_RXD_OL3ID_S 4 +#define HNS3_RXD_OL3ID_M (0xf << HNS3_RXD_OL3ID_S) +#define HNS3_RXD_OL4ID_S 8 +#define HNS3_RXD_OL4ID_M (0xf << HNS3_RXD_OL4ID_S) +#define HNS3_RXD_FBHI_S 12 +#define HNS3_RXD_FBHI_M (0x3 << HNS3_RXD_FBHI_S) +#define HNS3_RXD_FBLI_S 14 +#define HNS3_RXD_FBLI_M (0x3 << HNS3_RXD_FBLI_S) + +#define HNS3_RXD_PTYPE_S 4 +#define HNS3_RXD_PTYPE_M GENMASK(11, 4) + +#define HNS3_RXD_BDTYPE_S 0 +#define HNS3_RXD_BDTYPE_M (0xf << HNS3_RXD_BDTYPE_S) +#define HNS3_RXD_VLD_B 4 +#define HNS3_RXD_UDP0_B 5 +#define HNS3_RXD_EXTEND_B 7 +#define HNS3_RXD_FE_B 8 +#define HNS3_RXD_LUM_B 9 +#define HNS3_RXD_CRCP_B 10 +#define HNS3_RXD_L3L4P_B 11 +#define HNS3_RXD_TSIDX_S 12 +#define HNS3_RXD_TSIDX_M (0x3 << HNS3_RXD_TSIDX_S) +#define HNS3_RXD_TS_VLD_B 14 +#define HNS3_RXD_LKBK_B 15 +#define HNS3_RXD_GRO_SIZE_S 16 +#define HNS3_RXD_GRO_SIZE_M (0x3fff << HNS3_RXD_GRO_SIZE_S) + +#define HNS3_TXD_L3T_S 0 +#define HNS3_TXD_L3T_M (0x3 << HNS3_TXD_L3T_S) +#define HNS3_TXD_L4T_S 2 +#define HNS3_TXD_L4T_M (0x3 << HNS3_TXD_L4T_S) +#define HNS3_TXD_L3CS_B 4 +#define HNS3_TXD_L4CS_B 5 +#define HNS3_TXD_VLAN_B 6 +#define HNS3_TXD_TSO_B 7 + +#define HNS3_TXD_L2LEN_S 8 +#define HNS3_TXD_L2LEN_M (0xff << HNS3_TXD_L2LEN_S) +#define HNS3_TXD_L3LEN_S 16 +#define HNS3_TXD_L3LEN_M (0xff << HNS3_TXD_L3LEN_S) +#define HNS3_TXD_L4LEN_S 24 +#define HNS3_TXD_L4LEN_M (0xff << HNS3_TXD_L4LEN_S) + +#define HNS3_TXD_CSUM_START_S 8 +#define HNS3_TXD_CSUM_START_M (0xffff << HNS3_TXD_CSUM_START_S) + +#define HNS3_TXD_OL3T_S 0 +#define HNS3_TXD_OL3T_M (0x3 << HNS3_TXD_OL3T_S) +#define HNS3_TXD_OVLAN_B 2 +#define HNS3_TXD_MACSEC_B 3 +#define HNS3_TXD_TUNTYPE_S 4 +#define HNS3_TXD_TUNTYPE_M (0xf << HNS3_TXD_TUNTYPE_S) + +#define HNS3_TXD_CSUM_OFFSET_S 8 +#define HNS3_TXD_CSUM_OFFSET_M (0xffff << HNS3_TXD_CSUM_OFFSET_S) + +#define HNS3_TXD_BDTYPE_S 0 +#define HNS3_TXD_BDTYPE_M (0xf << HNS3_TXD_BDTYPE_S) +#define HNS3_TXD_FE_B 4 +#define HNS3_TXD_SC_S 5 +#define HNS3_TXD_SC_M (0x3 << HNS3_TXD_SC_S) +#define HNS3_TXD_EXTEND_B 7 +#define HNS3_TXD_VLD_B 8 +#define HNS3_TXD_RI_B 9 +#define HNS3_TXD_RA_B 10 +#define HNS3_TXD_TSYN_B 11 +#define HNS3_TXD_DECTTL_S 12 +#define HNS3_TXD_DECTTL_M (0xf << HNS3_TXD_DECTTL_S) + +#define HNS3_TXD_OL4CS_B 22 + +#define HNS3_TXD_MSS_S 0 +#define HNS3_TXD_MSS_M (0x3fff << HNS3_TXD_MSS_S) +#define HNS3_TXD_HW_CS_B 14 + +#define HNS3_VECTOR_TX_IRQ BIT_ULL(0) +#define HNS3_VECTOR_RX_IRQ BIT_ULL(1) + +#define HNS3_VECTOR_NOT_INITED 0 +#define HNS3_VECTOR_INITED 1 + +#define HNS3_MAX_BD_SIZE 65535 +#define HNS3_MAX_TSO_BD_NUM 63U +#define HNS3_MAX_TSO_SIZE 1048576U +#define HNS3_MAX_NON_TSO_SIZE 9728U + +#define HNS3_VECTOR_GL_MASK GENMASK(11, 0) +#define HNS3_VECTOR_GL0_OFFSET 0x100 +#define HNS3_VECTOR_GL1_OFFSET 0x200 +#define HNS3_VECTOR_GL2_OFFSET 0x300 +#define HNS3_VECTOR_RL_OFFSET 0x900 +#define HNS3_VECTOR_RL_EN_B 6 +#define HNS3_VECTOR_QL_MASK GENMASK(9, 0) +#define HNS3_VECTOR_TX_QL_OFFSET 0xe00 +#define HNS3_VECTOR_RX_QL_OFFSET 0xf00 + +#define HNS3_RING_EN_B 0 + +#define HNS3_GL0_CQ_MODE_REG 0x20d00 +#define HNS3_GL1_CQ_MODE_REG 0x20d04 +#define HNS3_GL2_CQ_MODE_REG 0x20d08 +#define HNS3_CQ_MODE_EQE 1U +#define HNS3_CQ_MODE_CQE 0U + +enum hns3_pkt_l2t_type { + HNS3_L2_TYPE_UNICAST, + HNS3_L2_TYPE_MULTICAST, + HNS3_L2_TYPE_BROADCAST, + HNS3_L2_TYPE_INVALID, +}; + +enum hns3_pkt_l3t_type { + HNS3_L3T_NONE, + HNS3_L3T_IPV6, + HNS3_L3T_IPV4, + HNS3_L3T_RESERVED +}; + +enum hns3_pkt_l4t_type { + HNS3_L4T_UNKNOWN, + HNS3_L4T_TCP, + HNS3_L4T_UDP, + HNS3_L4T_SCTP +}; + +enum hns3_pkt_ol3t_type { + HNS3_OL3T_NONE, + HNS3_OL3T_IPV6, + HNS3_OL3T_IPV4_NO_CSUM, + HNS3_OL3T_IPV4_CSUM +}; + +enum hns3_pkt_tun_type { + HNS3_TUN_NONE, + HNS3_TUN_MAC_IN_UDP, + HNS3_TUN_NVGRE, + HNS3_TUN_OTHER +}; + +/* hardware spec ring buffer format */ +struct __packed hns3_desc { + union { + __le64 addr; + __le16 csum; + struct { + __le32 ts_nsec; + __le32 ts_sec; + }; + }; + union { + struct { + __le16 vlan_tag; + __le16 send_size; + union { + __le32 type_cs_vlan_tso_len; + struct { + __u8 type_cs_vlan_tso; + __u8 l2_len; + __u8 l3_len; + __u8 l4_len; + }; + }; + __le16 outer_vlan_tag; + __le16 tv; + + union { + __le32 ol_type_vlan_len_msec; + struct { + __u8 ol_type_vlan_msec; + __u8 ol2_len; + __u8 ol3_len; + __u8 ol4_len; + }; + }; + + __le32 paylen_ol4cs; + __le16 bdtp_fe_sc_vld_ra_ri; + __le16 mss_hw_csum; + } tx; + + struct { + __le32 l234_info; + __le16 pkt_len; + __le16 size; + + __le32 rss_hash; + __le16 fd_id; + __le16 vlan_tag; + + union { + __le32 ol_info; + struct { + __le16 o_dm_vlan_id_fb; + __le16 ot_vlan_tag; + }; + }; + + __le32 bd_base_info; + } rx; + }; +}; + +enum hns3_desc_type { + DESC_TYPE_UNKNOWN = 0, + DESC_TYPE_SKB = 1 << 0, + DESC_TYPE_FRAGLIST_SKB = 1 << 1, + DESC_TYPE_PAGE = 1 << 2, + DESC_TYPE_BOUNCE_ALL = 1 << 3, + DESC_TYPE_BOUNCE_HEAD = 1 << 4, + DESC_TYPE_SGL_SKB = 1 << 5, + DESC_TYPE_PP_FRAG = 1 << 6, +}; + +struct hns3_desc_cb { + dma_addr_t dma; /* dma address of this desc */ + void *buf; /* cpu addr for a desc */ + + /* priv data for the desc, e.g. skb when use with ip stack */ + void *priv; + + union { + u32 page_offset; /* for rx */ + u32 send_bytes; /* for tx */ + }; + + u32 length; /* length of the buffer */ + + u16 reuse_flag; + u16 refill; + + /* desc type, used by the ring user to mark the type of the priv data */ + u16 type; + u16 pagecnt_bias; +}; + +enum hns3_pkt_l3type { + HNS3_L3_TYPE_IPV4, + HNS3_L3_TYPE_IPV6, + HNS3_L3_TYPE_ARP, + HNS3_L3_TYPE_RARP, + HNS3_L3_TYPE_IPV4_OPT, + HNS3_L3_TYPE_IPV6_EXT, + HNS3_L3_TYPE_LLDP, + HNS3_L3_TYPE_BPDU, + HNS3_L3_TYPE_MAC_PAUSE, + HNS3_L3_TYPE_PFC_PAUSE, /* 0x9 */ + + /* reserved for 0xA~0xB */ + + HNS3_L3_TYPE_CNM = 0xc, + + /* reserved for 0xD~0xE */ + + HNS3_L3_TYPE_PARSE_FAIL = 0xf /* must be last */ +}; + +enum hns3_pkt_l4type { + HNS3_L4_TYPE_UDP, + HNS3_L4_TYPE_TCP, + HNS3_L4_TYPE_GRE, + HNS3_L4_TYPE_SCTP, + HNS3_L4_TYPE_IGMP, + HNS3_L4_TYPE_ICMP, + + /* reserved for 0x6~0xE */ + + HNS3_L4_TYPE_PARSE_FAIL = 0xf /* must be last */ +}; + +enum hns3_pkt_ol3type { + HNS3_OL3_TYPE_IPV4 = 0, + HNS3_OL3_TYPE_IPV6, + /* reserved for 0x2~0x3 */ + HNS3_OL3_TYPE_IPV4_OPT = 4, + HNS3_OL3_TYPE_IPV6_EXT, + + /* reserved for 0x6~0xE */ + + HNS3_OL3_TYPE_PARSE_FAIL = 0xf /* must be last */ +}; + +enum hns3_pkt_ol4type { + HNS3_OL4_TYPE_NO_TUN, + HNS3_OL4_TYPE_MAC_IN_UDP, + HNS3_OL4_TYPE_NVGRE, + HNS3_OL4_TYPE_UNKNOWN +}; + +struct hns3_rx_ptype { + u32 ptype : 8; + u32 csum_level : 2; + u32 ip_summed : 2; + u32 l3_type : 4; + u32 valid : 1; + u32 hash_type: 3; +}; + +struct ring_stats { + u64 sw_err_cnt; + u64 seg_pkt_cnt; + union { + struct { + u64 tx_pkts; + u64 tx_bytes; + u64 tx_more; + u64 tx_push; + u64 tx_mem_doorbell; + u64 restart_queue; + u64 tx_busy; + u64 tx_copy; + u64 tx_vlan_err; + u64 tx_l4_proto_err; + u64 tx_l2l3l4_err; + u64 tx_tso_err; + u64 over_max_recursion; + u64 hw_limitation; + u64 tx_bounce; + u64 tx_spare_full; + u64 copy_bits_err; + u64 tx_sgl; + u64 skb2sgl_err; + u64 map_sg_err; + }; + struct { + u64 rx_pkts; + u64 rx_bytes; + u64 rx_err_cnt; + u64 reuse_pg_cnt; + u64 err_pkt_len; + u64 err_bd_num; + u64 l2_err; + u64 l3l4_csum_err; + u64 csum_complete; + u64 rx_multicast; + u64 non_reuse_pg; + u64 frag_alloc_err; + u64 frag_alloc; + }; + __le16 csum; + }; +}; + +struct hns3_tx_spare { + dma_addr_t dma; + void *buf; + u32 next_to_use; + u32 next_to_clean; + u32 last_to_clean; + u32 len; +}; + +struct hns3_enet_ring { + struct hns3_desc *desc; /* dma map address space */ + struct hns3_desc_cb *desc_cb; + struct hns3_enet_ring *next; + struct hns3_enet_tqp_vector *tqp_vector; + struct hnae3_queue *tqp; + int queue_index; + struct device *dev; /* will be used for DMA mapping of descriptors */ + struct page_pool *page_pool; + + /* statistic */ + struct ring_stats stats; + struct u64_stats_sync syncp; + + dma_addr_t desc_dma_addr; + u32 buf_size; /* size for hnae_desc->addr, preset by AE */ + u16 desc_num; /* total number of desc */ + int next_to_use; /* idx of next spare desc */ + + /* idx of lastest sent desc, the ring is empty when equal to + * next_to_use + */ + int next_to_clean; + u32 flag; /* ring attribute */ + + int pending_buf; + union { + /* for Tx ring */ + struct { + u32 fd_qb_tx_sample; + int last_to_use; /* last idx used by xmit */ + u32 tx_copybreak; + struct hns3_tx_spare *tx_spare; + }; + + /* for Rx ring */ + struct { + u32 pull_len; /* memcpy len for current rx packet */ + u32 rx_copybreak; + u32 frag_num; + /* first buffer address for current packet */ + unsigned char *va; + struct sk_buff *skb; + struct sk_buff *tail_skb; + }; + }; +} ____cacheline_internodealigned_in_smp; + +enum hns3_flow_level_range { + HNS3_FLOW_LOW = 0, + HNS3_FLOW_MID = 1, + HNS3_FLOW_HIGH = 2, + HNS3_FLOW_ULTRA = 3, +}; + +#define HNS3_INT_GL_50K 0x0014 +#define HNS3_INT_GL_20K 0x0032 +#define HNS3_INT_GL_18K 0x0036 +#define HNS3_INT_GL_8K 0x007C + +#define HNS3_INT_GL_1US BIT(31) + +#define HNS3_INT_RL_MAX 0x00EC +#define HNS3_INT_RL_ENABLE_MASK 0x40 + +#define HNS3_INT_QL_DEFAULT_CFG 0x20 + +struct hns3_enet_coalesce { + u16 int_gl; + u16 int_ql; + u16 int_ql_max; + u8 adapt_enable : 1; + u8 ql_enable : 1; + u8 unit_1us : 1; + enum hns3_flow_level_range flow_level; +}; + +struct hns3_enet_ring_group { + /* array of pointers to rings */ + struct hns3_enet_ring *ring; + u64 total_bytes; /* total bytes processed this group */ + u64 total_packets; /* total packets processed this group */ + u16 count; + struct hns3_enet_coalesce coal; + struct dim dim; +}; + +struct hns3_enet_tqp_vector { + struct hnae3_handle *handle; + u8 __iomem *mask_addr; + int vector_irq; + int irq_init_flag; + + u16 idx; /* index in the TQP vector array per handle. */ + + struct napi_struct napi; + + struct hns3_enet_ring_group rx_group; + struct hns3_enet_ring_group tx_group; + + cpumask_t affinity_mask; + u16 num_tqps; /* total number of tqps in TQP vector */ + struct irq_affinity_notify affinity_notify; + + char name[HNAE3_INT_NAME_LEN]; + + u64 event_cnt; +} ____cacheline_internodealigned_in_smp; + +struct hns3_nic_priv { + struct hnae3_handle *ae_handle; + struct net_device *netdev; + struct device *dev; + + /** + * the cb for nic to manage the ring buffer, the first half of the + * array is for tx_ring and vice versa for the second half + */ + struct hns3_enet_ring *ring; + struct hns3_enet_tqp_vector *tqp_vector; + u16 vector_num; + u8 max_non_tso_bd_num; + + u64 tx_timeout_count; + + unsigned long state; + + enum dim_cq_period_mode tx_cqe_mode; + enum dim_cq_period_mode rx_cqe_mode; + struct hns3_enet_coalesce tx_coal; + struct hns3_enet_coalesce rx_coal; + u32 tx_copybreak; + u32 rx_copybreak; +}; + +union l3_hdr_info { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; +}; + +union l4_hdr_info { + struct tcphdr *tcp; + struct udphdr *udp; + struct gre_base_hdr *gre; + unsigned char *hdr; +}; + +struct hns3_hw_error_info { + enum hnae3_hw_error_type type; + const char *msg; +}; + +struct hns3_reset_type_map { + enum ethtool_reset_flags rst_flags; + enum hnae3_reset_type rst_type; +}; + +static inline int ring_space(struct hns3_enet_ring *ring) +{ + /* This smp_load_acquire() pairs with smp_store_release() in + * hns3_nic_reclaim_one_desc called by hns3_clean_tx_ring. + */ + int begin = smp_load_acquire(&ring->next_to_clean); + int end = READ_ONCE(ring->next_to_use); + + return ((end >= begin) ? (ring->desc_num - end + begin) : + (begin - end)) - 1; +} + +static inline u32 hns3_tqp_read_reg(struct hns3_enet_ring *ring, u32 reg) +{ + return readl_relaxed(ring->tqp->io_base + reg); +} + +static inline u32 hns3_read_reg(void __iomem *base, u32 reg) +{ + return readl(base + reg); +} + +static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value) +{ + u8 __iomem *reg_addr = READ_ONCE(base); + + writel(value, reg_addr + reg); +} + +#define hns3_read_dev(a, reg) \ + hns3_read_reg((a)->io_base, reg) + +static inline bool hns3_nic_resetting(struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + + return test_bit(HNS3_NIC_STATE_RESETTING, &priv->state); +} + +#define hns3_write_dev(a, reg, value) \ + hns3_write_reg((a)->io_base, reg, value) + +#define ring_to_dev(ring) ((ring)->dev) + +#define ring_to_netdev(ring) ((ring)->tqp_vector->napi.dev) + +#define ring_to_dma_dir(ring) (HNAE3_IS_TX_RING(ring) ? \ + DMA_TO_DEVICE : DMA_FROM_DEVICE) + +#define hns3_buf_size(_ring) ((_ring)->buf_size) + +#define hns3_ring_stats_update(ring, cnt) do { \ + typeof(ring) (tmp) = (ring); \ + u64_stats_update_begin(&(tmp)->syncp); \ + ((tmp)->stats.cnt)++; \ + u64_stats_update_end(&(tmp)->syncp); \ +} while (0) \ + +static inline unsigned int hns3_page_order(struct hns3_enet_ring *ring) +{ +#if (PAGE_SIZE < 8192) + if (ring->buf_size > (PAGE_SIZE / 2)) + return 1; +#endif + return 0; +} + +#define hns3_page_size(_ring) (PAGE_SIZE << hns3_page_order(_ring)) + +/* iterator for handling rings in ring group */ +#define hns3_for_each_ring(pos, head) \ + for (pos = (head).ring; (pos); pos = (pos)->next) + +#define hns3_get_handle(ndev) \ + (((struct hns3_nic_priv *)netdev_priv(ndev))->ae_handle) + +#define hns3_gl_usec_to_reg(int_gl) ((int_gl) >> 1) +#define hns3_gl_round_down(int_gl) round_down(int_gl, 2) + +#define hns3_rl_usec_to_reg(int_rl) ((int_rl) >> 2) +#define hns3_rl_round_down(int_rl) round_down(int_rl, 4) + +void hns3_ethtool_set_ops(struct net_device *netdev); +int hns3_set_channels(struct net_device *netdev, + struct ethtool_channels *ch); + +void hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget); +int hns3_init_all_ring(struct hns3_nic_priv *priv); +int hns3_nic_reset_all_ring(struct hnae3_handle *h); +void hns3_fini_ring(struct hns3_enet_ring *ring); +netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev); +bool hns3_is_phys_func(struct pci_dev *pdev); +int hns3_clean_rx_ring( + struct hns3_enet_ring *ring, int budget, + void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *)); + +void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector, + u32 gl_value); +void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector, + u32 gl_value); +void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector, + u32 rl_value); +void hns3_set_vector_coalesce_rx_ql(struct hns3_enet_tqp_vector *tqp_vector, + u32 ql_value); +void hns3_set_vector_coalesce_tx_ql(struct hns3_enet_tqp_vector *tqp_vector, + u32 ql_value); + +void hns3_request_update_promisc_mode(struct hnae3_handle *handle); +int hns3_reset_notify(struct hnae3_handle *handle, + enum hnae3_reset_notify_type type); + +#ifdef CONFIG_HNS3_DCB +void hns3_dcbnl_setup(struct hnae3_handle *handle); +#else +static inline void hns3_dcbnl_setup(struct hnae3_handle *handle) {} +#endif + +int hns3_dbg_init(struct hnae3_handle *handle); +void hns3_dbg_uninit(struct hnae3_handle *handle); +void hns3_dbg_register_debugfs(const char *debugfs_dir_name); +void hns3_dbg_unregister_debugfs(void); +void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size); +u16 hns3_get_max_available_channels(struct hnae3_handle *h); +void hns3_cq_period_mode_init(struct hns3_nic_priv *priv, + enum dim_cq_period_mode tx_mode, + enum dim_cq_period_mode rx_mode); + +void hns3_external_lb_prepare(struct net_device *ndev, bool if_running); +void hns3_external_lb_restore(struct net_device *ndev, bool if_running); +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c new file mode 100644 index 000000000..e22835ae8 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -0,0 +1,2156 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. + +#include +#include +#include +#include + +#include "hns3_enet.h" +#include "hns3_ethtool.h" + +/* tqp related stats */ +#define HNS3_TQP_STAT(_string, _member) { \ + .stats_string = _string, \ + .stats_offset = offsetof(struct hns3_enet_ring, stats) +\ + offsetof(struct ring_stats, _member), \ +} + +static const struct hns3_stats hns3_txq_stats[] = { + /* Tx per-queue statistics */ + HNS3_TQP_STAT("dropped", sw_err_cnt), + HNS3_TQP_STAT("seg_pkt_cnt", seg_pkt_cnt), + HNS3_TQP_STAT("packets", tx_pkts), + HNS3_TQP_STAT("bytes", tx_bytes), + HNS3_TQP_STAT("more", tx_more), + HNS3_TQP_STAT("push", tx_push), + HNS3_TQP_STAT("mem_doorbell", tx_mem_doorbell), + HNS3_TQP_STAT("wake", restart_queue), + HNS3_TQP_STAT("busy", tx_busy), + HNS3_TQP_STAT("copy", tx_copy), + HNS3_TQP_STAT("vlan_err", tx_vlan_err), + HNS3_TQP_STAT("l4_proto_err", tx_l4_proto_err), + HNS3_TQP_STAT("l2l3l4_err", tx_l2l3l4_err), + HNS3_TQP_STAT("tso_err", tx_tso_err), + HNS3_TQP_STAT("over_max_recursion", over_max_recursion), + HNS3_TQP_STAT("hw_limitation", hw_limitation), + HNS3_TQP_STAT("bounce", tx_bounce), + HNS3_TQP_STAT("spare_full", tx_spare_full), + HNS3_TQP_STAT("copy_bits_err", copy_bits_err), + HNS3_TQP_STAT("sgl", tx_sgl), + HNS3_TQP_STAT("skb2sgl_err", skb2sgl_err), + HNS3_TQP_STAT("map_sg_err", map_sg_err), +}; + +#define HNS3_TXQ_STATS_COUNT ARRAY_SIZE(hns3_txq_stats) + +static const struct hns3_stats hns3_rxq_stats[] = { + /* Rx per-queue statistics */ + HNS3_TQP_STAT("dropped", sw_err_cnt), + HNS3_TQP_STAT("seg_pkt_cnt", seg_pkt_cnt), + HNS3_TQP_STAT("packets", rx_pkts), + HNS3_TQP_STAT("bytes", rx_bytes), + HNS3_TQP_STAT("errors", rx_err_cnt), + HNS3_TQP_STAT("reuse_pg_cnt", reuse_pg_cnt), + HNS3_TQP_STAT("err_pkt_len", err_pkt_len), + HNS3_TQP_STAT("err_bd_num", err_bd_num), + HNS3_TQP_STAT("l2_err", l2_err), + HNS3_TQP_STAT("l3l4_csum_err", l3l4_csum_err), + HNS3_TQP_STAT("csum_complete", csum_complete), + HNS3_TQP_STAT("multicast", rx_multicast), + HNS3_TQP_STAT("non_reuse_pg", non_reuse_pg), + HNS3_TQP_STAT("frag_alloc_err", frag_alloc_err), + HNS3_TQP_STAT("frag_alloc", frag_alloc), +}; + +#define HNS3_PRIV_FLAGS_LEN ARRAY_SIZE(hns3_priv_flags) + +#define HNS3_RXQ_STATS_COUNT ARRAY_SIZE(hns3_rxq_stats) + +#define HNS3_TQP_STATS_COUNT (HNS3_TXQ_STATS_COUNT + HNS3_RXQ_STATS_COUNT) + +#define HNS3_NIC_LB_TEST_PKT_NUM 1 +#define HNS3_NIC_LB_TEST_RING_ID 0 +#define HNS3_NIC_LB_TEST_PACKET_SIZE 128 +#define HNS3_NIC_LB_SETUP_USEC 10000 + +/* Nic loopback test err */ +#define HNS3_NIC_LB_TEST_NO_MEM_ERR 1 +#define HNS3_NIC_LB_TEST_TX_CNT_ERR 2 +#define HNS3_NIC_LB_TEST_RX_CNT_ERR 3 + +static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en) +{ + struct hnae3_handle *h = hns3_get_handle(ndev); + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev); + int ret; + + if (!h->ae_algo->ops->set_loopback || + !h->ae_algo->ops->set_promisc_mode) + return -EOPNOTSUPP; + + switch (loop) { + case HNAE3_LOOP_SERIAL_SERDES: + case HNAE3_LOOP_PARALLEL_SERDES: + case HNAE3_LOOP_APP: + case HNAE3_LOOP_PHY: + case HNAE3_LOOP_EXTERNAL: + ret = h->ae_algo->ops->set_loopback(h, loop, en); + break; + default: + ret = -ENOTSUPP; + break; + } + + if (ret || ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) + return ret; + + if (en) + h->ae_algo->ops->set_promisc_mode(h, true, true); + else + /* recover promisc mode before loopback test */ + hns3_request_update_promisc_mode(h); + + return ret; +} + +static int hns3_lp_up(struct net_device *ndev, enum hnae3_loop loop_mode) +{ + struct hnae3_handle *h = hns3_get_handle(ndev); + int ret; + + ret = hns3_nic_reset_all_ring(h); + if (ret) + return ret; + + ret = hns3_lp_setup(ndev, loop_mode, true); + usleep_range(HNS3_NIC_LB_SETUP_USEC, HNS3_NIC_LB_SETUP_USEC * 2); + + return ret; +} + +static int hns3_lp_down(struct net_device *ndev, enum hnae3_loop loop_mode) +{ + int ret; + + ret = hns3_lp_setup(ndev, loop_mode, false); + if (ret) { + netdev_err(ndev, "lb_setup return error: %d\n", ret); + return ret; + } + + usleep_range(HNS3_NIC_LB_SETUP_USEC, HNS3_NIC_LB_SETUP_USEC * 2); + + return 0; +} + +static void hns3_lp_setup_skb(struct sk_buff *skb) +{ +#define HNS3_NIC_LB_DST_MAC_ADDR 0x1f + + struct net_device *ndev = skb->dev; + struct hnae3_handle *handle; + struct hnae3_ae_dev *ae_dev; + unsigned char *packet; + struct ethhdr *ethh; + unsigned int i; + + skb_reserve(skb, NET_IP_ALIGN); + ethh = skb_put(skb, sizeof(struct ethhdr)); + packet = skb_put(skb, HNS3_NIC_LB_TEST_PACKET_SIZE); + + memcpy(ethh->h_dest, ndev->dev_addr, ETH_ALEN); + + /* The dst mac addr of loopback packet is the same as the host' + * mac addr, the SSU component may loop back the packet to host + * before the packet reaches mac or serdes, which will defect + * the purpose of mac or serdes selftest. + */ + handle = hns3_get_handle(ndev); + ae_dev = pci_get_drvdata(handle->pdev); + if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) + ethh->h_dest[5] += HNS3_NIC_LB_DST_MAC_ADDR; + eth_zero_addr(ethh->h_source); + ethh->h_proto = htons(ETH_P_ARP); + skb_reset_mac_header(skb); + + for (i = 0; i < HNS3_NIC_LB_TEST_PACKET_SIZE; i++) + packet[i] = (unsigned char)(i & 0xff); +} + +static void hns3_lb_check_skb_data(struct hns3_enet_ring *ring, + struct sk_buff *skb) +{ + struct hns3_enet_tqp_vector *tqp_vector = ring->tqp_vector; + unsigned char *packet = skb->data; + u32 len = skb_headlen(skb); + u32 i; + + len = min_t(u32, len, HNS3_NIC_LB_TEST_PACKET_SIZE); + + for (i = 0; i < len; i++) + if (packet[i] != (unsigned char)(i & 0xff)) + break; + + /* The packet is correctly received */ + if (i == HNS3_NIC_LB_TEST_PACKET_SIZE) + tqp_vector->rx_group.total_packets++; + else + print_hex_dump(KERN_ERR, "selftest:", DUMP_PREFIX_OFFSET, 16, 1, + skb->data, len, true); + + dev_kfree_skb_any(skb); +} + +static u32 hns3_lb_check_rx_ring(struct hns3_nic_priv *priv, u32 budget) +{ + struct hnae3_handle *h = priv->ae_handle; + struct hnae3_knic_private_info *kinfo; + u32 i, rcv_good_pkt_total = 0; + + kinfo = &h->kinfo; + for (i = kinfo->num_tqps; i < kinfo->num_tqps * 2; i++) { + struct hns3_enet_ring *ring = &priv->ring[i]; + struct hns3_enet_ring_group *rx_group; + u64 pre_rx_pkt; + + rx_group = &ring->tqp_vector->rx_group; + pre_rx_pkt = rx_group->total_packets; + + preempt_disable(); + hns3_clean_rx_ring(ring, budget, hns3_lb_check_skb_data); + preempt_enable(); + + rcv_good_pkt_total += (rx_group->total_packets - pre_rx_pkt); + rx_group->total_packets = pre_rx_pkt; + } + return rcv_good_pkt_total; +} + +static void hns3_lb_clear_tx_ring(struct hns3_nic_priv *priv, u32 start_ringid, + u32 end_ringid, u32 budget) +{ + u32 i; + + for (i = start_ringid; i <= end_ringid; i++) { + struct hns3_enet_ring *ring = &priv->ring[i]; + + hns3_clean_tx_ring(ring, 0); + } +} + +/** + * hns3_lp_run_test - run loopback test + * @ndev: net device + * @mode: loopback type + * + * Return: %0 for success or a NIC loopback test error code on failure + */ +static int hns3_lp_run_test(struct net_device *ndev, enum hnae3_loop mode) +{ + struct hns3_nic_priv *priv = netdev_priv(ndev); + struct sk_buff *skb; + u32 i, good_cnt; + int ret_val = 0; + + skb = alloc_skb(HNS3_NIC_LB_TEST_PACKET_SIZE + ETH_HLEN + NET_IP_ALIGN, + GFP_KERNEL); + if (!skb) + return HNS3_NIC_LB_TEST_NO_MEM_ERR; + + skb->dev = ndev; + hns3_lp_setup_skb(skb); + skb->queue_mapping = HNS3_NIC_LB_TEST_RING_ID; + + good_cnt = 0; + for (i = 0; i < HNS3_NIC_LB_TEST_PKT_NUM; i++) { + netdev_tx_t tx_ret; + + skb_get(skb); + tx_ret = hns3_nic_net_xmit(skb, ndev); + if (tx_ret == NETDEV_TX_OK) { + good_cnt++; + } else { + kfree_skb(skb); + netdev_err(ndev, "hns3_lb_run_test xmit failed: %d\n", + tx_ret); + } + } + if (good_cnt != HNS3_NIC_LB_TEST_PKT_NUM) { + ret_val = HNS3_NIC_LB_TEST_TX_CNT_ERR; + netdev_err(ndev, "mode %d sent fail, cnt=0x%x, budget=0x%x\n", + mode, good_cnt, HNS3_NIC_LB_TEST_PKT_NUM); + goto out; + } + + /* Allow 200 milliseconds for packets to go from Tx to Rx */ + msleep(200); + + good_cnt = hns3_lb_check_rx_ring(priv, HNS3_NIC_LB_TEST_PKT_NUM); + if (good_cnt != HNS3_NIC_LB_TEST_PKT_NUM) { + ret_val = HNS3_NIC_LB_TEST_RX_CNT_ERR; + netdev_err(ndev, "mode %d recv fail, cnt=0x%x, budget=0x%x\n", + mode, good_cnt, HNS3_NIC_LB_TEST_PKT_NUM); + } + +out: + hns3_lb_clear_tx_ring(priv, HNS3_NIC_LB_TEST_RING_ID, + HNS3_NIC_LB_TEST_RING_ID, + HNS3_NIC_LB_TEST_PKT_NUM); + + kfree_skb(skb); + return ret_val; +} + +static void hns3_set_selftest_param(struct hnae3_handle *h, int (*st_param)[2]) +{ + st_param[HNAE3_LOOP_EXTERNAL][0] = HNAE3_LOOP_EXTERNAL; + st_param[HNAE3_LOOP_EXTERNAL][1] = + h->flags & HNAE3_SUPPORT_EXTERNAL_LOOPBACK; + + st_param[HNAE3_LOOP_APP][0] = HNAE3_LOOP_APP; + st_param[HNAE3_LOOP_APP][1] = + h->flags & HNAE3_SUPPORT_APP_LOOPBACK; + + st_param[HNAE3_LOOP_SERIAL_SERDES][0] = HNAE3_LOOP_SERIAL_SERDES; + st_param[HNAE3_LOOP_SERIAL_SERDES][1] = + h->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK; + + st_param[HNAE3_LOOP_PARALLEL_SERDES][0] = + HNAE3_LOOP_PARALLEL_SERDES; + st_param[HNAE3_LOOP_PARALLEL_SERDES][1] = + h->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK; + + st_param[HNAE3_LOOP_PHY][0] = HNAE3_LOOP_PHY; + st_param[HNAE3_LOOP_PHY][1] = + h->flags & HNAE3_SUPPORT_PHY_LOOPBACK; +} + +static void hns3_selftest_prepare(struct net_device *ndev, bool if_running) +{ + struct hns3_nic_priv *priv = netdev_priv(ndev); + struct hnae3_handle *h = priv->ae_handle; + + if (if_running) + ndev->netdev_ops->ndo_stop(ndev); + +#if IS_ENABLED(CONFIG_VLAN_8021Q) + /* Disable the vlan filter for selftest does not support it */ + if (h->ae_algo->ops->enable_vlan_filter && + ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) + h->ae_algo->ops->enable_vlan_filter(h, false); +#endif + + /* Tell firmware to stop mac autoneg before loopback test start, + * otherwise loopback test may be failed when the port is still + * negotiating. + */ + if (h->ae_algo->ops->halt_autoneg) + h->ae_algo->ops->halt_autoneg(h, true); + + set_bit(HNS3_NIC_STATE_TESTING, &priv->state); +} + +static void hns3_selftest_restore(struct net_device *ndev, bool if_running) +{ + struct hns3_nic_priv *priv = netdev_priv(ndev); + struct hnae3_handle *h = priv->ae_handle; + + clear_bit(HNS3_NIC_STATE_TESTING, &priv->state); + + if (h->ae_algo->ops->halt_autoneg) + h->ae_algo->ops->halt_autoneg(h, false); + +#if IS_ENABLED(CONFIG_VLAN_8021Q) + if (h->ae_algo->ops->enable_vlan_filter && + ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) + h->ae_algo->ops->enable_vlan_filter(h, true); +#endif + + if (if_running) + ndev->netdev_ops->ndo_open(ndev); +} + +static void hns3_do_selftest(struct net_device *ndev, int (*st_param)[2], + struct ethtool_test *eth_test, u64 *data) +{ + int test_index = HNAE3_LOOP_APP; + u32 i; + + for (i = HNAE3_LOOP_APP; i < HNAE3_LOOP_NONE; i++) { + enum hnae3_loop loop_type = (enum hnae3_loop)st_param[i][0]; + + if (!st_param[i][1]) + continue; + + data[test_index] = hns3_lp_up(ndev, loop_type); + if (!data[test_index]) + data[test_index] = hns3_lp_run_test(ndev, loop_type); + + hns3_lp_down(ndev, loop_type); + + if (data[test_index]) + eth_test->flags |= ETH_TEST_FL_FAILED; + + test_index++; + } +} + +static void hns3_do_external_lb(struct net_device *ndev, + struct ethtool_test *eth_test, u64 *data) +{ + data[HNAE3_LOOP_EXTERNAL] = hns3_lp_up(ndev, HNAE3_LOOP_EXTERNAL); + if (!data[HNAE3_LOOP_EXTERNAL]) + data[HNAE3_LOOP_EXTERNAL] = hns3_lp_run_test(ndev, HNAE3_LOOP_EXTERNAL); + hns3_lp_down(ndev, HNAE3_LOOP_EXTERNAL); + + if (data[HNAE3_LOOP_EXTERNAL]) + eth_test->flags |= ETH_TEST_FL_FAILED; + + eth_test->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE; +} + +/** + * hns3_self_test - self test + * @ndev: net device + * @eth_test: test cmd + * @data: test result + */ +static void hns3_self_test(struct net_device *ndev, + struct ethtool_test *eth_test, u64 *data) +{ + struct hns3_nic_priv *priv = netdev_priv(ndev); + struct hnae3_handle *h = priv->ae_handle; + int st_param[HNAE3_LOOP_NONE][2]; + bool if_running = netif_running(ndev); + + if (hns3_nic_resetting(ndev)) { + netdev_err(ndev, "dev resetting!"); + return; + } + + if (!(eth_test->flags & ETH_TEST_FL_OFFLINE)) + return; + + if (netif_msg_ifdown(h)) + netdev_info(ndev, "self test start\n"); + + hns3_set_selftest_param(h, st_param); + + /* external loopback test requires that the link is up and the duplex is + * full, do external test first to reduce the whole test time + */ + if (eth_test->flags & ETH_TEST_FL_EXTERNAL_LB) { + hns3_external_lb_prepare(ndev, if_running); + hns3_do_external_lb(ndev, eth_test, data); + hns3_external_lb_restore(ndev, if_running); + } + + hns3_selftest_prepare(ndev, if_running); + hns3_do_selftest(ndev, st_param, eth_test, data); + hns3_selftest_restore(ndev, if_running); + + if (netif_msg_ifdown(h)) + netdev_info(ndev, "self test end\n"); +} + +static void hns3_update_limit_promisc_mode(struct net_device *netdev, + bool enable) +{ + struct hnae3_handle *handle = hns3_get_handle(netdev); + + if (enable) + set_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags); + else + clear_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags); + + hns3_request_update_promisc_mode(handle); +} + +static const struct hns3_pflag_desc hns3_priv_flags[HNAE3_PFLAG_MAX] = { + { "limit_promisc", hns3_update_limit_promisc_mode } +}; + +static int hns3_get_sset_count(struct net_device *netdev, int stringset) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + const struct hnae3_ae_ops *ops = h->ae_algo->ops; + + if (!ops->get_sset_count) + return -EOPNOTSUPP; + + switch (stringset) { + case ETH_SS_STATS: + return ((HNS3_TQP_STATS_COUNT * h->kinfo.num_tqps) + + ops->get_sset_count(h, stringset)); + + case ETH_SS_TEST: + return ops->get_sset_count(h, stringset); + + case ETH_SS_PRIV_FLAGS: + return HNAE3_PFLAG_MAX; + + default: + return -EOPNOTSUPP; + } +} + +static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats, + u32 stat_count, u32 num_tqps, const char *prefix) +{ +#define MAX_PREFIX_SIZE (6 + 4) + u32 size_left; + u32 i, j; + u32 n1; + + for (i = 0; i < num_tqps; i++) { + for (j = 0; j < stat_count; j++) { + data[ETH_GSTRING_LEN - 1] = '\0'; + + /* first, prepend the prefix string */ + n1 = scnprintf(data, MAX_PREFIX_SIZE, "%s%u_", + prefix, i); + size_left = (ETH_GSTRING_LEN - 1) - n1; + + /* now, concatenate the stats string to it */ + strncat(data, stats[j].stats_string, size_left); + data += ETH_GSTRING_LEN; + } + } + + return data; +} + +static u8 *hns3_get_strings_tqps(struct hnae3_handle *handle, u8 *data) +{ + struct hnae3_knic_private_info *kinfo = &handle->kinfo; + const char tx_prefix[] = "txq"; + const char rx_prefix[] = "rxq"; + + /* get strings for Tx */ + data = hns3_update_strings(data, hns3_txq_stats, HNS3_TXQ_STATS_COUNT, + kinfo->num_tqps, tx_prefix); + + /* get strings for Rx */ + data = hns3_update_strings(data, hns3_rxq_stats, HNS3_RXQ_STATS_COUNT, + kinfo->num_tqps, rx_prefix); + + return data; +} + +static void hns3_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + const struct hnae3_ae_ops *ops = h->ae_algo->ops; + char *buff = (char *)data; + int i; + + if (!ops->get_strings) + return; + + switch (stringset) { + case ETH_SS_STATS: + buff = hns3_get_strings_tqps(h, buff); + ops->get_strings(h, stringset, (u8 *)buff); + break; + case ETH_SS_TEST: + ops->get_strings(h, stringset, data); + break; + case ETH_SS_PRIV_FLAGS: + for (i = 0; i < HNS3_PRIV_FLAGS_LEN; i++) { + snprintf(buff, ETH_GSTRING_LEN, "%s", + hns3_priv_flags[i].name); + buff += ETH_GSTRING_LEN; + } + break; + default: + break; + } +} + +static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data) +{ + struct hns3_nic_priv *nic_priv = (struct hns3_nic_priv *)handle->priv; + struct hnae3_knic_private_info *kinfo = &handle->kinfo; + struct hns3_enet_ring *ring; + u8 *stat; + int i, j; + + /* get stats for Tx */ + for (i = 0; i < kinfo->num_tqps; i++) { + ring = &nic_priv->ring[i]; + for (j = 0; j < HNS3_TXQ_STATS_COUNT; j++) { + stat = (u8 *)ring + hns3_txq_stats[j].stats_offset; + *data++ = *(u64 *)stat; + } + } + + /* get stats for Rx */ + for (i = 0; i < kinfo->num_tqps; i++) { + ring = &nic_priv->ring[i + kinfo->num_tqps]; + for (j = 0; j < HNS3_RXQ_STATS_COUNT; j++) { + stat = (u8 *)ring + hns3_rxq_stats[j].stats_offset; + *data++ = *(u64 *)stat; + } + } + + return data; +} + +/* hns3_get_stats - get detail statistics. + * @netdev: net device + * @stats: statistics info. + * @data: statistics data. + */ +static void hns3_get_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + u64 *p = data; + + if (hns3_nic_resetting(netdev)) { + netdev_err(netdev, "dev resetting, could not get stats\n"); + return; + } + + if (!h->ae_algo->ops->get_stats || !h->ae_algo->ops->update_stats) { + netdev_err(netdev, "could not get any statistics\n"); + return; + } + + h->ae_algo->ops->update_stats(h, &netdev->stats); + + /* get per-queue stats */ + p = hns3_get_stats_tqps(h, p); + + /* get MAC & other misc hardware stats */ + h->ae_algo->ops->get_stats(h, p); +} + +static void hns3_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + u32 fw_version; + + if (!h->ae_algo->ops->get_fw_version) { + netdev_err(netdev, "could not get fw version!\n"); + return; + } + + strncpy(drvinfo->driver, dev_driver_string(&h->pdev->dev), + sizeof(drvinfo->driver)); + drvinfo->driver[sizeof(drvinfo->driver) - 1] = '\0'; + + strncpy(drvinfo->bus_info, pci_name(h->pdev), + sizeof(drvinfo->bus_info)); + drvinfo->bus_info[ETHTOOL_BUSINFO_LEN - 1] = '\0'; + + fw_version = priv->ae_handle->ae_algo->ops->get_fw_version(h); + + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), + "%lu.%lu.%lu.%lu", + hnae3_get_field(fw_version, HNAE3_FW_VERSION_BYTE3_MASK, + HNAE3_FW_VERSION_BYTE3_SHIFT), + hnae3_get_field(fw_version, HNAE3_FW_VERSION_BYTE2_MASK, + HNAE3_FW_VERSION_BYTE2_SHIFT), + hnae3_get_field(fw_version, HNAE3_FW_VERSION_BYTE1_MASK, + HNAE3_FW_VERSION_BYTE1_SHIFT), + hnae3_get_field(fw_version, HNAE3_FW_VERSION_BYTE0_MASK, + HNAE3_FW_VERSION_BYTE0_SHIFT)); +} + +static u32 hns3_get_link(struct net_device *netdev) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (h->ae_algo->ops->get_status) + return h->ae_algo->ops->get_status(h); + else + return 0; +} + +static void hns3_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *param, + struct kernel_ethtool_ringparam *kernel_param, + struct netlink_ext_ack *extack) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + int rx_queue_index = h->kinfo.num_tqps; + + if (hns3_nic_resetting(netdev) || !priv->ring) { + netdev_err(netdev, "failed to get ringparam value, due to dev resetting or uninited\n"); + return; + } + + param->tx_max_pending = HNS3_RING_MAX_PENDING; + param->rx_max_pending = HNS3_RING_MAX_PENDING; + + param->tx_pending = priv->ring[0].desc_num; + param->rx_pending = priv->ring[rx_queue_index].desc_num; + kernel_param->rx_buf_len = priv->ring[rx_queue_index].buf_size; + kernel_param->tx_push = test_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, + &priv->state); +} + +static void hns3_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *param) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev); + + if (!test_bit(HNAE3_DEV_SUPPORT_PAUSE_B, ae_dev->caps)) + return; + + if (h->ae_algo->ops->get_pauseparam) + h->ae_algo->ops->get_pauseparam(h, ¶m->autoneg, + ¶m->rx_pause, ¶m->tx_pause); +} + +static int hns3_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *param) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev); + + if (!test_bit(HNAE3_DEV_SUPPORT_PAUSE_B, ae_dev->caps)) + return -EOPNOTSUPP; + + netif_dbg(h, drv, netdev, + "set pauseparam: autoneg=%u, rx:%u, tx:%u\n", + param->autoneg, param->rx_pause, param->tx_pause); + + if (h->ae_algo->ops->set_pauseparam) + return h->ae_algo->ops->set_pauseparam(h, param->autoneg, + param->rx_pause, + param->tx_pause); + return -EOPNOTSUPP; +} + +static void hns3_get_ksettings(struct hnae3_handle *h, + struct ethtool_link_ksettings *cmd) +{ + const struct hnae3_ae_ops *ops = h->ae_algo->ops; + + /* 1.auto_neg & speed & duplex from cmd */ + if (ops->get_ksettings_an_result) + ops->get_ksettings_an_result(h, + &cmd->base.autoneg, + &cmd->base.speed, + &cmd->base.duplex, + &cmd->lanes); + + /* 2.get link mode */ + if (ops->get_link_mode) + ops->get_link_mode(h, + cmd->link_modes.supported, + cmd->link_modes.advertising); + + /* 3.mdix_ctrl&mdix get from phy reg */ + if (ops->get_mdix_mode) + ops->get_mdix_mode(h, &cmd->base.eth_tp_mdix_ctrl, + &cmd->base.eth_tp_mdix); +} + +static int hns3_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev); + const struct hnae3_ae_ops *ops; + u8 module_type; + u8 media_type; + u8 link_stat; + + ops = h->ae_algo->ops; + if (ops->get_media_type) + ops->get_media_type(h, &media_type, &module_type); + else + return -EOPNOTSUPP; + + switch (media_type) { + case HNAE3_MEDIA_TYPE_NONE: + cmd->base.port = PORT_NONE; + hns3_get_ksettings(h, cmd); + break; + case HNAE3_MEDIA_TYPE_FIBER: + if (module_type == HNAE3_MODULE_TYPE_UNKNOWN) + cmd->base.port = PORT_OTHER; + else if (module_type == HNAE3_MODULE_TYPE_CR) + cmd->base.port = PORT_DA; + else + cmd->base.port = PORT_FIBRE; + + hns3_get_ksettings(h, cmd); + break; + case HNAE3_MEDIA_TYPE_BACKPLANE: + cmd->base.port = PORT_NONE; + hns3_get_ksettings(h, cmd); + break; + case HNAE3_MEDIA_TYPE_COPPER: + cmd->base.port = PORT_TP; + if (test_bit(HNAE3_DEV_SUPPORT_PHY_IMP_B, ae_dev->caps) && + ops->get_phy_link_ksettings) + ops->get_phy_link_ksettings(h, cmd); + else if (!netdev->phydev) + hns3_get_ksettings(h, cmd); + else + phy_ethtool_ksettings_get(netdev->phydev, cmd); + break; + default: + + netdev_warn(netdev, "Unknown media type"); + return 0; + } + + /* mdio_support */ + cmd->base.mdio_support = ETH_MDIO_SUPPORTS_C22; + + link_stat = hns3_get_link(netdev); + if (!link_stat) { + cmd->base.speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; + } + + return 0; +} + +static int hns3_check_ksettings_param(const struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + struct hnae3_handle *handle = hns3_get_handle(netdev); + const struct hnae3_ae_ops *ops = handle->ae_algo->ops; + u8 module_type = HNAE3_MODULE_TYPE_UNKNOWN; + u8 media_type = HNAE3_MEDIA_TYPE_UNKNOWN; + u32 lane_num; + u8 autoneg; + u32 speed; + u8 duplex; + int ret; + + /* hw doesn't support use specified speed and duplex to negotiate, + * unnecessary to check them when autoneg on. + */ + if (cmd->base.autoneg) + return 0; + + if (ops->get_ksettings_an_result) { + ops->get_ksettings_an_result(handle, &autoneg, &speed, &duplex, &lane_num); + if (cmd->base.autoneg == autoneg && cmd->base.speed == speed && + cmd->base.duplex == duplex && cmd->lanes == lane_num) + return 0; + } + + if (ops->get_media_type) + ops->get_media_type(handle, &media_type, &module_type); + + if (cmd->base.duplex == DUPLEX_HALF && + media_type != HNAE3_MEDIA_TYPE_COPPER) { + netdev_err(netdev, + "only copper port supports half duplex!"); + return -EINVAL; + } + + if (ops->check_port_speed) { + ret = ops->check_port_speed(handle, cmd->base.speed); + if (ret) { + netdev_err(netdev, "unsupported speed\n"); + return ret; + } + } + + return 0; +} + +static int hns3_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + struct hnae3_handle *handle = hns3_get_handle(netdev); + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); + const struct hnae3_ae_ops *ops = handle->ae_algo->ops; + int ret; + + /* Chip don't support this mode. */ + if (cmd->base.speed == SPEED_1000 && cmd->base.duplex == DUPLEX_HALF) + return -EINVAL; + + if (cmd->lanes && !hnae3_ae_dev_lane_num_supported(ae_dev)) + return -EOPNOTSUPP; + + netif_dbg(handle, drv, netdev, + "set link(%s): autoneg=%u, speed=%u, duplex=%u, lanes=%u\n", + netdev->phydev ? "phy" : "mac", + cmd->base.autoneg, cmd->base.speed, cmd->base.duplex, + cmd->lanes); + + /* Only support ksettings_set for netdev with phy attached for now */ + if (netdev->phydev) { + if (cmd->base.speed == SPEED_1000 && + cmd->base.autoneg == AUTONEG_DISABLE) + return -EINVAL; + + return phy_ethtool_ksettings_set(netdev->phydev, cmd); + } else if (test_bit(HNAE3_DEV_SUPPORT_PHY_IMP_B, ae_dev->caps) && + ops->set_phy_link_ksettings) { + return ops->set_phy_link_ksettings(handle, cmd); + } + + if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) + return -EOPNOTSUPP; + + ret = hns3_check_ksettings_param(netdev, cmd); + if (ret) + return ret; + + if (ops->set_autoneg) { + ret = ops->set_autoneg(handle, cmd->base.autoneg); + if (ret) + return ret; + } + + /* hw doesn't support use specified speed and duplex to negotiate, + * ignore them when autoneg on. + */ + if (cmd->base.autoneg) { + netdev_info(netdev, + "autoneg is on, ignore the speed and duplex\n"); + return 0; + } + + if (ops->cfg_mac_speed_dup_h) + ret = ops->cfg_mac_speed_dup_h(handle, cmd->base.speed, + cmd->base.duplex, (u8)(cmd->lanes)); + + return ret; +} + +static u32 hns3_get_rss_key_size(struct net_device *netdev) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (!h->ae_algo->ops->get_rss_key_size) + return 0; + + return h->ae_algo->ops->get_rss_key_size(h); +} + +static u32 hns3_get_rss_indir_size(struct net_device *netdev) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev); + + return ae_dev->dev_specs.rss_ind_tbl_size; +} + +static int hns3_get_rss(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (!h->ae_algo->ops->get_rss) + return -EOPNOTSUPP; + + return h->ae_algo->ops->get_rss(h, indir, key, hfunc); +} + +static int hns3_set_rss(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev); + + if (!h->ae_algo->ops->set_rss) + return -EOPNOTSUPP; + + if ((ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2 && + hfunc != ETH_RSS_HASH_TOP) || (hfunc != ETH_RSS_HASH_NO_CHANGE && + hfunc != ETH_RSS_HASH_TOP && hfunc != ETH_RSS_HASH_XOR)) { + netdev_err(netdev, "hash func not supported\n"); + return -EOPNOTSUPP; + } + + if (!indir) { + netdev_err(netdev, + "set rss failed for indir is empty\n"); + return -EOPNOTSUPP; + } + + return h->ae_algo->ops->set_rss(h, indir, key, hfunc); +} + +static int hns3_get_rxnfc(struct net_device *netdev, + struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = h->kinfo.num_tqps; + return 0; + case ETHTOOL_GRXFH: + if (h->ae_algo->ops->get_rss_tuple) + return h->ae_algo->ops->get_rss_tuple(h, cmd); + return -EOPNOTSUPP; + case ETHTOOL_GRXCLSRLCNT: + if (h->ae_algo->ops->get_fd_rule_cnt) + return h->ae_algo->ops->get_fd_rule_cnt(h, cmd); + return -EOPNOTSUPP; + case ETHTOOL_GRXCLSRULE: + if (h->ae_algo->ops->get_fd_rule_info) + return h->ae_algo->ops->get_fd_rule_info(h, cmd); + return -EOPNOTSUPP; + case ETHTOOL_GRXCLSRLALL: + if (h->ae_algo->ops->get_fd_all_rules) + return h->ae_algo->ops->get_fd_all_rules(h, cmd, + rule_locs); + return -EOPNOTSUPP; + default: + return -EOPNOTSUPP; + } +} + +static const struct hns3_reset_type_map hns3_reset_type[] = { + {ETH_RESET_MGMT, HNAE3_IMP_RESET}, + {ETH_RESET_ALL, HNAE3_GLOBAL_RESET}, + {ETH_RESET_DEDICATED, HNAE3_FUNC_RESET}, +}; + +static const struct hns3_reset_type_map hns3vf_reset_type[] = { + {ETH_RESET_DEDICATED, HNAE3_VF_FUNC_RESET}, +}; + +static int hns3_set_reset(struct net_device *netdev, u32 *flags) +{ + enum hnae3_reset_type rst_type = HNAE3_NONE_RESET; + struct hnae3_handle *h = hns3_get_handle(netdev); + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev); + const struct hnae3_ae_ops *ops = h->ae_algo->ops; + const struct hns3_reset_type_map *rst_type_map; + enum ethtool_reset_flags rst_flags; + u32 i, size; + + if (ops->ae_dev_resetting && ops->ae_dev_resetting(h)) + return -EBUSY; + + if (!ops->set_default_reset_request || !ops->reset_event) + return -EOPNOTSUPP; + + if (h->flags & HNAE3_SUPPORT_VF) { + rst_type_map = hns3vf_reset_type; + size = ARRAY_SIZE(hns3vf_reset_type); + } else { + rst_type_map = hns3_reset_type; + size = ARRAY_SIZE(hns3_reset_type); + } + + for (i = 0; i < size; i++) { + if (rst_type_map[i].rst_flags == *flags) { + rst_type = rst_type_map[i].rst_type; + rst_flags = rst_type_map[i].rst_flags; + break; + } + } + + if (rst_type == HNAE3_NONE_RESET || + (rst_type == HNAE3_IMP_RESET && + ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2)) + return -EOPNOTSUPP; + + netdev_info(netdev, "Setting reset type %d\n", rst_type); + + ops->set_default_reset_request(ae_dev, rst_type); + + ops->reset_event(h->pdev, h); + + *flags &= ~rst_flags; + + return 0; +} + +static void hns3_change_all_ring_bd_num(struct hns3_nic_priv *priv, + u32 tx_desc_num, u32 rx_desc_num) +{ + struct hnae3_handle *h = priv->ae_handle; + int i; + + h->kinfo.num_tx_desc = tx_desc_num; + h->kinfo.num_rx_desc = rx_desc_num; + + for (i = 0; i < h->kinfo.num_tqps; i++) { + priv->ring[i].desc_num = tx_desc_num; + priv->ring[i + h->kinfo.num_tqps].desc_num = rx_desc_num; + } +} + +static struct hns3_enet_ring *hns3_backup_ringparam(struct hns3_nic_priv *priv) +{ + struct hnae3_handle *handle = priv->ae_handle; + struct hns3_enet_ring *tmp_rings; + int i; + + tmp_rings = kcalloc(handle->kinfo.num_tqps * 2, + sizeof(struct hns3_enet_ring), GFP_KERNEL); + if (!tmp_rings) + return NULL; + + for (i = 0; i < handle->kinfo.num_tqps * 2; i++) { + memcpy(&tmp_rings[i], &priv->ring[i], + sizeof(struct hns3_enet_ring)); + tmp_rings[i].skb = NULL; + } + + return tmp_rings; +} + +static int hns3_check_ringparam(struct net_device *ndev, + struct ethtool_ringparam *param, + struct kernel_ethtool_ringparam *kernel_param) +{ +#define RX_BUF_LEN_2K 2048 +#define RX_BUF_LEN_4K 4096 + + struct hns3_nic_priv *priv = netdev_priv(ndev); + + if (hns3_nic_resetting(ndev) || !priv->ring) { + netdev_err(ndev, "failed to set ringparam value, due to dev resetting or uninited\n"); + return -EBUSY; + } + + + if (param->rx_mini_pending || param->rx_jumbo_pending) + return -EINVAL; + + if (kernel_param->rx_buf_len != RX_BUF_LEN_2K && + kernel_param->rx_buf_len != RX_BUF_LEN_4K) { + netdev_err(ndev, "Rx buf len only support 2048 and 4096\n"); + return -EINVAL; + } + + if (param->tx_pending > HNS3_RING_MAX_PENDING || + param->tx_pending < HNS3_RING_MIN_PENDING || + param->rx_pending > HNS3_RING_MAX_PENDING || + param->rx_pending < HNS3_RING_MIN_PENDING) { + netdev_err(ndev, "Queue depth out of range [%d-%d]\n", + HNS3_RING_MIN_PENDING, HNS3_RING_MAX_PENDING); + return -EINVAL; + } + + return 0; +} + +static bool +hns3_is_ringparam_changed(struct net_device *ndev, + struct ethtool_ringparam *param, + struct kernel_ethtool_ringparam *kernel_param, + struct hns3_ring_param *old_ringparam, + struct hns3_ring_param *new_ringparam) +{ + struct hns3_nic_priv *priv = netdev_priv(ndev); + struct hnae3_handle *h = priv->ae_handle; + u16 queue_num = h->kinfo.num_tqps; + + new_ringparam->tx_desc_num = ALIGN(param->tx_pending, + HNS3_RING_BD_MULTIPLE); + new_ringparam->rx_desc_num = ALIGN(param->rx_pending, + HNS3_RING_BD_MULTIPLE); + old_ringparam->tx_desc_num = priv->ring[0].desc_num; + old_ringparam->rx_desc_num = priv->ring[queue_num].desc_num; + old_ringparam->rx_buf_len = priv->ring[queue_num].buf_size; + new_ringparam->rx_buf_len = kernel_param->rx_buf_len; + + if (old_ringparam->tx_desc_num == new_ringparam->tx_desc_num && + old_ringparam->rx_desc_num == new_ringparam->rx_desc_num && + old_ringparam->rx_buf_len == new_ringparam->rx_buf_len) { + netdev_info(ndev, "descriptor number and rx buffer length not changed\n"); + return false; + } + + return true; +} + +static int hns3_change_rx_buf_len(struct net_device *ndev, u32 rx_buf_len) +{ + struct hns3_nic_priv *priv = netdev_priv(ndev); + struct hnae3_handle *h = priv->ae_handle; + int i; + + h->kinfo.rx_buf_len = rx_buf_len; + + for (i = 0; i < h->kinfo.num_tqps; i++) { + h->kinfo.tqp[i]->buf_size = rx_buf_len; + priv->ring[i + h->kinfo.num_tqps].buf_size = rx_buf_len; + } + + return 0; +} + +static int hns3_set_tx_push(struct net_device *netdev, u32 tx_push) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = hns3_get_handle(netdev); + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev); + u32 old_state = test_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state); + + if (!test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps) && tx_push) + return -EOPNOTSUPP; + + if (tx_push == old_state) + return 0; + + netdev_dbg(netdev, "Changing tx push from %s to %s\n", + old_state ? "on" : "off", tx_push ? "on" : "off"); + + if (tx_push) + set_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state); + else + clear_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state); + + return 0; +} + +static int hns3_set_ringparam(struct net_device *ndev, + struct ethtool_ringparam *param, + struct kernel_ethtool_ringparam *kernel_param, + struct netlink_ext_ack *extack) +{ + struct hns3_ring_param old_ringparam, new_ringparam; + struct hns3_nic_priv *priv = netdev_priv(ndev); + struct hnae3_handle *h = priv->ae_handle; + struct hns3_enet_ring *tmp_rings; + bool if_running = netif_running(ndev); + int ret, i; + + ret = hns3_check_ringparam(ndev, param, kernel_param); + if (ret) + return ret; + + ret = hns3_set_tx_push(ndev, kernel_param->tx_push); + if (ret) + return ret; + + if (!hns3_is_ringparam_changed(ndev, param, kernel_param, + &old_ringparam, &new_ringparam)) + return 0; + + tmp_rings = hns3_backup_ringparam(priv); + if (!tmp_rings) { + netdev_err(ndev, "backup ring param failed by allocating memory fail\n"); + return -ENOMEM; + } + + netdev_info(ndev, + "Changing Tx/Rx ring depth from %u/%u to %u/%u, Changing rx buffer len from %u to %u\n", + old_ringparam.tx_desc_num, old_ringparam.rx_desc_num, + new_ringparam.tx_desc_num, new_ringparam.rx_desc_num, + old_ringparam.rx_buf_len, new_ringparam.rx_buf_len); + + if (if_running) + ndev->netdev_ops->ndo_stop(ndev); + + hns3_change_all_ring_bd_num(priv, new_ringparam.tx_desc_num, + new_ringparam.rx_desc_num); + hns3_change_rx_buf_len(ndev, new_ringparam.rx_buf_len); + ret = hns3_init_all_ring(priv); + if (ret) { + netdev_err(ndev, "set ringparam fail, revert to old value(%d)\n", + ret); + + hns3_change_rx_buf_len(ndev, old_ringparam.rx_buf_len); + hns3_change_all_ring_bd_num(priv, old_ringparam.tx_desc_num, + old_ringparam.rx_desc_num); + for (i = 0; i < h->kinfo.num_tqps * 2; i++) + memcpy(&priv->ring[i], &tmp_rings[i], + sizeof(struct hns3_enet_ring)); + } else { + for (i = 0; i < h->kinfo.num_tqps * 2; i++) + hns3_fini_ring(&tmp_rings[i]); + } + + kfree(tmp_rings); + + if (if_running) + ret = ndev->netdev_ops->ndo_open(ndev); + + return ret; +} + +static int hns3_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + switch (cmd->cmd) { + case ETHTOOL_SRXFH: + if (h->ae_algo->ops->set_rss_tuple) + return h->ae_algo->ops->set_rss_tuple(h, cmd); + return -EOPNOTSUPP; + case ETHTOOL_SRXCLSRLINS: + if (h->ae_algo->ops->add_fd_entry) + return h->ae_algo->ops->add_fd_entry(h, cmd); + return -EOPNOTSUPP; + case ETHTOOL_SRXCLSRLDEL: + if (h->ae_algo->ops->del_fd_entry) + return h->ae_algo->ops->del_fd_entry(h, cmd); + return -EOPNOTSUPP; + default: + return -EOPNOTSUPP; + } +} + +static int hns3_nway_reset(struct net_device *netdev) +{ + struct hnae3_handle *handle = hns3_get_handle(netdev); + const struct hnae3_ae_ops *ops = handle->ae_algo->ops; + struct phy_device *phy = netdev->phydev; + int autoneg; + + if (!netif_running(netdev)) + return 0; + + if (hns3_nic_resetting(netdev)) { + netdev_err(netdev, "dev resetting!"); + return -EBUSY; + } + + if (!ops->get_autoneg || !ops->restart_autoneg) + return -EOPNOTSUPP; + + autoneg = ops->get_autoneg(handle); + if (autoneg != AUTONEG_ENABLE) { + netdev_err(netdev, + "Autoneg is off, don't support to restart it\n"); + return -EINVAL; + } + + netif_dbg(handle, drv, netdev, + "nway reset (using %s)\n", phy ? "phy" : "mac"); + + if (phy) + return genphy_restart_aneg(phy); + + return ops->restart_autoneg(handle); +} + +static void hns3_get_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (h->ae_algo->ops->get_channels) + h->ae_algo->ops->get_channels(h, ch); +} + +static int hns3_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *cmd, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hns3_enet_coalesce *tx_coal = &priv->tx_coal; + struct hns3_enet_coalesce *rx_coal = &priv->rx_coal; + struct hnae3_handle *h = priv->ae_handle; + + if (hns3_nic_resetting(netdev)) + return -EBUSY; + + cmd->use_adaptive_tx_coalesce = tx_coal->adapt_enable; + cmd->use_adaptive_rx_coalesce = rx_coal->adapt_enable; + + cmd->tx_coalesce_usecs = tx_coal->int_gl; + cmd->rx_coalesce_usecs = rx_coal->int_gl; + + cmd->tx_coalesce_usecs_high = h->kinfo.int_rl_setting; + cmd->rx_coalesce_usecs_high = h->kinfo.int_rl_setting; + + cmd->tx_max_coalesced_frames = tx_coal->int_ql; + cmd->rx_max_coalesced_frames = rx_coal->int_ql; + + kernel_coal->use_cqe_mode_tx = (priv->tx_cqe_mode == + DIM_CQ_PERIOD_MODE_START_FROM_CQE); + kernel_coal->use_cqe_mode_rx = (priv->rx_cqe_mode == + DIM_CQ_PERIOD_MODE_START_FROM_CQE); + + return 0; +} + +static int hns3_check_gl_coalesce_para(struct net_device *netdev, + struct ethtool_coalesce *cmd) +{ + struct hnae3_handle *handle = hns3_get_handle(netdev); + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); + u32 rx_gl, tx_gl; + + if (cmd->rx_coalesce_usecs > ae_dev->dev_specs.max_int_gl) { + netdev_err(netdev, + "invalid rx-usecs value, rx-usecs range is 0-%u\n", + ae_dev->dev_specs.max_int_gl); + return -EINVAL; + } + + if (cmd->tx_coalesce_usecs > ae_dev->dev_specs.max_int_gl) { + netdev_err(netdev, + "invalid tx-usecs value, tx-usecs range is 0-%u\n", + ae_dev->dev_specs.max_int_gl); + return -EINVAL; + } + + /* device version above V3(include V3), GL uses 1us unit, + * so the round down is not needed. + */ + if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) + return 0; + + rx_gl = hns3_gl_round_down(cmd->rx_coalesce_usecs); + if (rx_gl != cmd->rx_coalesce_usecs) { + netdev_info(netdev, + "rx_usecs(%u) rounded down to %u, because it must be multiple of 2.\n", + cmd->rx_coalesce_usecs, rx_gl); + } + + tx_gl = hns3_gl_round_down(cmd->tx_coalesce_usecs); + if (tx_gl != cmd->tx_coalesce_usecs) { + netdev_info(netdev, + "tx_usecs(%u) rounded down to %u, because it must be multiple of 2.\n", + cmd->tx_coalesce_usecs, tx_gl); + } + + return 0; +} + +static int hns3_check_rl_coalesce_para(struct net_device *netdev, + struct ethtool_coalesce *cmd) +{ + u32 rl; + + if (cmd->tx_coalesce_usecs_high != cmd->rx_coalesce_usecs_high) { + netdev_err(netdev, + "tx_usecs_high must be same as rx_usecs_high.\n"); + return -EINVAL; + } + + if (cmd->rx_coalesce_usecs_high > HNS3_INT_RL_MAX) { + netdev_err(netdev, + "Invalid usecs_high value, usecs_high range is 0-%d\n", + HNS3_INT_RL_MAX); + return -EINVAL; + } + + rl = hns3_rl_round_down(cmd->rx_coalesce_usecs_high); + if (rl != cmd->rx_coalesce_usecs_high) { + netdev_info(netdev, + "usecs_high(%u) rounded down to %u, because it must be multiple of 4.\n", + cmd->rx_coalesce_usecs_high, rl); + } + + return 0; +} + +static int hns3_check_ql_coalesce_param(struct net_device *netdev, + struct ethtool_coalesce *cmd) +{ + struct hnae3_handle *handle = hns3_get_handle(netdev); + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); + + if ((cmd->tx_max_coalesced_frames || cmd->rx_max_coalesced_frames) && + !ae_dev->dev_specs.int_ql_max) { + netdev_err(netdev, "coalesced frames is not supported\n"); + return -EOPNOTSUPP; + } + + if (cmd->tx_max_coalesced_frames > ae_dev->dev_specs.int_ql_max || + cmd->rx_max_coalesced_frames > ae_dev->dev_specs.int_ql_max) { + netdev_err(netdev, + "invalid coalesced_frames value, range is 0-%u\n", + ae_dev->dev_specs.int_ql_max); + return -ERANGE; + } + + return 0; +} + +static int +hns3_check_cqe_coalesce_param(struct net_device *netdev, + struct kernel_ethtool_coalesce *kernel_coal) +{ + struct hnae3_handle *handle = hns3_get_handle(netdev); + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); + + if ((kernel_coal->use_cqe_mode_tx || kernel_coal->use_cqe_mode_rx) && + !hnae3_ae_dev_cq_supported(ae_dev)) { + netdev_err(netdev, "coalesced cqe mode is not supported\n"); + return -EOPNOTSUPP; + } + + return 0; +} + +static int +hns3_check_coalesce_para(struct net_device *netdev, + struct ethtool_coalesce *cmd, + struct kernel_ethtool_coalesce *kernel_coal) +{ + int ret; + + ret = hns3_check_cqe_coalesce_param(netdev, kernel_coal); + if (ret) + return ret; + + ret = hns3_check_gl_coalesce_para(netdev, cmd); + if (ret) { + netdev_err(netdev, + "Check gl coalesce param fail. ret = %d\n", ret); + return ret; + } + + ret = hns3_check_rl_coalesce_para(netdev, cmd); + if (ret) { + netdev_err(netdev, + "Check rl coalesce param fail. ret = %d\n", ret); + return ret; + } + + return hns3_check_ql_coalesce_param(netdev, cmd); +} + +static void hns3_set_coalesce_per_queue(struct net_device *netdev, + struct ethtool_coalesce *cmd, + u32 queue) +{ + struct hns3_enet_tqp_vector *tx_vector, *rx_vector; + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + int queue_num = h->kinfo.num_tqps; + + tx_vector = priv->ring[queue].tqp_vector; + rx_vector = priv->ring[queue_num + queue].tqp_vector; + + tx_vector->tx_group.coal.adapt_enable = + cmd->use_adaptive_tx_coalesce; + rx_vector->rx_group.coal.adapt_enable = + cmd->use_adaptive_rx_coalesce; + + tx_vector->tx_group.coal.int_gl = cmd->tx_coalesce_usecs; + rx_vector->rx_group.coal.int_gl = cmd->rx_coalesce_usecs; + + tx_vector->tx_group.coal.int_ql = cmd->tx_max_coalesced_frames; + rx_vector->rx_group.coal.int_ql = cmd->rx_max_coalesced_frames; + + hns3_set_vector_coalesce_tx_gl(tx_vector, + tx_vector->tx_group.coal.int_gl); + hns3_set_vector_coalesce_rx_gl(rx_vector, + rx_vector->rx_group.coal.int_gl); + + hns3_set_vector_coalesce_rl(tx_vector, h->kinfo.int_rl_setting); + hns3_set_vector_coalesce_rl(rx_vector, h->kinfo.int_rl_setting); + + if (tx_vector->tx_group.coal.ql_enable) + hns3_set_vector_coalesce_tx_ql(tx_vector, + tx_vector->tx_group.coal.int_ql); + if (rx_vector->rx_group.coal.ql_enable) + hns3_set_vector_coalesce_rx_ql(rx_vector, + rx_vector->rx_group.coal.int_ql); +} + +static int hns3_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *cmd, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hns3_enet_coalesce *tx_coal = &priv->tx_coal; + struct hns3_enet_coalesce *rx_coal = &priv->rx_coal; + u16 queue_num = h->kinfo.num_tqps; + enum dim_cq_period_mode tx_mode; + enum dim_cq_period_mode rx_mode; + int ret; + int i; + + if (hns3_nic_resetting(netdev)) + return -EBUSY; + + ret = hns3_check_coalesce_para(netdev, cmd, kernel_coal); + if (ret) + return ret; + + h->kinfo.int_rl_setting = + hns3_rl_round_down(cmd->rx_coalesce_usecs_high); + + tx_coal->adapt_enable = cmd->use_adaptive_tx_coalesce; + rx_coal->adapt_enable = cmd->use_adaptive_rx_coalesce; + + tx_coal->int_gl = cmd->tx_coalesce_usecs; + rx_coal->int_gl = cmd->rx_coalesce_usecs; + + tx_coal->int_ql = cmd->tx_max_coalesced_frames; + rx_coal->int_ql = cmd->rx_max_coalesced_frames; + + for (i = 0; i < queue_num; i++) + hns3_set_coalesce_per_queue(netdev, cmd, i); + + tx_mode = kernel_coal->use_cqe_mode_tx ? + DIM_CQ_PERIOD_MODE_START_FROM_CQE : + DIM_CQ_PERIOD_MODE_START_FROM_EQE; + rx_mode = kernel_coal->use_cqe_mode_rx ? + DIM_CQ_PERIOD_MODE_START_FROM_CQE : + DIM_CQ_PERIOD_MODE_START_FROM_EQE; + hns3_cq_period_mode_init(priv, tx_mode, rx_mode); + + return 0; +} + +static int hns3_get_regs_len(struct net_device *netdev) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (!h->ae_algo->ops->get_regs_len) + return -EOPNOTSUPP; + + return h->ae_algo->ops->get_regs_len(h); +} + +static void hns3_get_regs(struct net_device *netdev, + struct ethtool_regs *cmd, void *data) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (!h->ae_algo->ops->get_regs) + return; + + h->ae_algo->ops->get_regs(h, &cmd->version, data); +} + +static int hns3_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (!h->ae_algo->ops->set_led_id) + return -EOPNOTSUPP; + + return h->ae_algo->ops->set_led_id(h, state); +} + +static u32 hns3_get_msglevel(struct net_device *netdev) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + return h->msg_enable; +} + +static void hns3_set_msglevel(struct net_device *netdev, u32 msg_level) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + h->msg_enable = msg_level; +} + +static void hns3_get_fec_stats(struct net_device *netdev, + struct ethtool_fec_stats *fec_stats) +{ + struct hnae3_handle *handle = hns3_get_handle(netdev); + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); + const struct hnae3_ae_ops *ops = handle->ae_algo->ops; + + if (!hnae3_ae_dev_fec_stats_supported(ae_dev) || !ops->get_fec_stats) + return; + + ops->get_fec_stats(handle, fec_stats); +} + +/* Translate local fec value into ethtool value. */ +static unsigned int loc_to_eth_fec(u8 loc_fec) +{ + u32 eth_fec = 0; + + if (loc_fec & BIT(HNAE3_FEC_AUTO)) + eth_fec |= ETHTOOL_FEC_AUTO; + if (loc_fec & BIT(HNAE3_FEC_RS)) + eth_fec |= ETHTOOL_FEC_RS; + if (loc_fec & BIT(HNAE3_FEC_LLRS)) + eth_fec |= ETHTOOL_FEC_LLRS; + if (loc_fec & BIT(HNAE3_FEC_BASER)) + eth_fec |= ETHTOOL_FEC_BASER; + if (loc_fec & BIT(HNAE3_FEC_NONE)) + eth_fec |= ETHTOOL_FEC_OFF; + + return eth_fec; +} + +/* Translate ethtool fec value into local value. */ +static unsigned int eth_to_loc_fec(unsigned int eth_fec) +{ + u32 loc_fec = 0; + + if (eth_fec & ETHTOOL_FEC_OFF) + loc_fec |= BIT(HNAE3_FEC_NONE); + if (eth_fec & ETHTOOL_FEC_AUTO) + loc_fec |= BIT(HNAE3_FEC_AUTO); + if (eth_fec & ETHTOOL_FEC_RS) + loc_fec |= BIT(HNAE3_FEC_RS); + if (eth_fec & ETHTOOL_FEC_LLRS) + loc_fec |= BIT(HNAE3_FEC_LLRS); + if (eth_fec & ETHTOOL_FEC_BASER) + loc_fec |= BIT(HNAE3_FEC_BASER); + + return loc_fec; +} + +static int hns3_get_fecparam(struct net_device *netdev, + struct ethtool_fecparam *fec) +{ + struct hnae3_handle *handle = hns3_get_handle(netdev); + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); + const struct hnae3_ae_ops *ops = handle->ae_algo->ops; + u8 fec_ability; + u8 fec_mode; + + if (!test_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps)) + return -EOPNOTSUPP; + + if (!ops->get_fec) + return -EOPNOTSUPP; + + ops->get_fec(handle, &fec_ability, &fec_mode); + + fec->fec = loc_to_eth_fec(fec_ability); + fec->active_fec = loc_to_eth_fec(fec_mode); + if (!fec->active_fec) + fec->active_fec = ETHTOOL_FEC_OFF; + + return 0; +} + +static int hns3_set_fecparam(struct net_device *netdev, + struct ethtool_fecparam *fec) +{ + struct hnae3_handle *handle = hns3_get_handle(netdev); + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); + const struct hnae3_ae_ops *ops = handle->ae_algo->ops; + u32 fec_mode; + + if (!test_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps)) + return -EOPNOTSUPP; + + if (!ops->set_fec) + return -EOPNOTSUPP; + fec_mode = eth_to_loc_fec(fec->fec); + + netif_dbg(handle, drv, netdev, "set fecparam: mode=%u\n", fec_mode); + + return ops->set_fec(handle, fec_mode); +} + +static int hns3_get_module_info(struct net_device *netdev, + struct ethtool_modinfo *modinfo) +{ +#define HNS3_SFF_8636_V1_3 0x03 + + struct hnae3_handle *handle = hns3_get_handle(netdev); + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); + const struct hnae3_ae_ops *ops = handle->ae_algo->ops; + struct hns3_sfp_type sfp_type; + int ret; + + if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2 || + !ops->get_module_eeprom) + return -EOPNOTSUPP; + + memset(&sfp_type, 0, sizeof(sfp_type)); + ret = ops->get_module_eeprom(handle, 0, sizeof(sfp_type) / sizeof(u8), + (u8 *)&sfp_type); + if (ret) + return ret; + + switch (sfp_type.type) { + case SFF8024_ID_SFP: + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + break; + case SFF8024_ID_QSFP_8438: + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN; + break; + case SFF8024_ID_QSFP_8436_8636: + if (sfp_type.ext_type < HNS3_SFF_8636_V1_3) { + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN; + } else { + modinfo->type = ETH_MODULE_SFF_8636; + modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN; + } + break; + case SFF8024_ID_QSFP28_8636: + modinfo->type = ETH_MODULE_SFF_8636; + modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN; + break; + default: + netdev_err(netdev, "Optical module unknown: %#x\n", + sfp_type.type); + return -EINVAL; + } + + return 0; +} + +static int hns3_get_module_eeprom(struct net_device *netdev, + struct ethtool_eeprom *ee, u8 *data) +{ + struct hnae3_handle *handle = hns3_get_handle(netdev); + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); + const struct hnae3_ae_ops *ops = handle->ae_algo->ops; + + if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2 || + !ops->get_module_eeprom) + return -EOPNOTSUPP; + + if (!ee->len) + return -EINVAL; + + memset(data, 0, ee->len); + + return ops->get_module_eeprom(handle, ee->offset, ee->len, data); +} + +static u32 hns3_get_priv_flags(struct net_device *netdev) +{ + struct hnae3_handle *handle = hns3_get_handle(netdev); + + return handle->priv_flags; +} + +static int hns3_check_priv_flags(struct hnae3_handle *h, u32 changed) +{ + u32 i; + + for (i = 0; i < HNAE3_PFLAG_MAX; i++) + if ((changed & BIT(i)) && !test_bit(i, &h->supported_pflags)) { + netdev_err(h->netdev, "%s is unsupported\n", + hns3_priv_flags[i].name); + return -EOPNOTSUPP; + } + + return 0; +} + +static int hns3_set_priv_flags(struct net_device *netdev, u32 pflags) +{ + struct hnae3_handle *handle = hns3_get_handle(netdev); + u32 changed = pflags ^ handle->priv_flags; + int ret; + u32 i; + + ret = hns3_check_priv_flags(handle, changed); + if (ret) + return ret; + + for (i = 0; i < HNAE3_PFLAG_MAX; i++) { + if (changed & BIT(i)) { + bool enable = !(handle->priv_flags & BIT(i)); + + if (enable) + handle->priv_flags |= BIT(i); + else + handle->priv_flags &= ~BIT(i); + hns3_priv_flags[i].handler(netdev, enable); + } + } + + return 0; +} + +static int hns3_get_tunable(struct net_device *netdev, + const struct ethtool_tunable *tuna, + void *data) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + int ret = 0; + + switch (tuna->id) { + case ETHTOOL_TX_COPYBREAK: + /* all the tx rings have the same tx_copybreak */ + *(u32 *)data = priv->tx_copybreak; + break; + case ETHTOOL_RX_COPYBREAK: + *(u32 *)data = priv->rx_copybreak; + break; + case ETHTOOL_TX_COPYBREAK_BUF_SIZE: + *(u32 *)data = h->kinfo.tx_spare_buf_size; + break; + default: + ret = -EOPNOTSUPP; + break; + } + + return ret; +} + +static int hns3_set_tx_spare_buf_size(struct net_device *netdev, + u32 data) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + int ret; + + h->kinfo.tx_spare_buf_size = data; + + ret = hns3_reset_notify(h, HNAE3_DOWN_CLIENT); + if (ret) + return ret; + + ret = hns3_reset_notify(h, HNAE3_UNINIT_CLIENT); + if (ret) + return ret; + + ret = hns3_reset_notify(h, HNAE3_INIT_CLIENT); + if (ret) + return ret; + + ret = hns3_reset_notify(h, HNAE3_UP_CLIENT); + if (ret) + hns3_reset_notify(h, HNAE3_UNINIT_CLIENT); + + return ret; +} + +static int hns3_set_tunable(struct net_device *netdev, + const struct ethtool_tunable *tuna, + const void *data) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + u32 old_tx_spare_buf_size, new_tx_spare_buf_size; + struct hnae3_handle *h = priv->ae_handle; + int i, ret = 0; + + if (hns3_nic_resetting(netdev) || !priv->ring) { + netdev_err(netdev, "failed to set tunable value, dev resetting!"); + return -EBUSY; + } + + switch (tuna->id) { + case ETHTOOL_TX_COPYBREAK: + priv->tx_copybreak = *(u32 *)data; + + for (i = 0; i < h->kinfo.num_tqps; i++) + priv->ring[i].tx_copybreak = priv->tx_copybreak; + + break; + case ETHTOOL_RX_COPYBREAK: + priv->rx_copybreak = *(u32 *)data; + + for (i = h->kinfo.num_tqps; i < h->kinfo.num_tqps * 2; i++) + priv->ring[i].rx_copybreak = priv->rx_copybreak; + + break; + case ETHTOOL_TX_COPYBREAK_BUF_SIZE: + old_tx_spare_buf_size = h->kinfo.tx_spare_buf_size; + new_tx_spare_buf_size = *(u32 *)data; + netdev_info(netdev, "request to set tx spare buf size from %u to %u\n", + old_tx_spare_buf_size, new_tx_spare_buf_size); + ret = hns3_set_tx_spare_buf_size(netdev, new_tx_spare_buf_size); + if (ret || + (!priv->ring->tx_spare && new_tx_spare_buf_size != 0)) { + int ret1; + + netdev_warn(netdev, "change tx spare buf size fail, revert to old value\n"); + ret1 = hns3_set_tx_spare_buf_size(netdev, + old_tx_spare_buf_size); + if (ret1) { + netdev_err(netdev, "revert to old tx spare buf size fail\n"); + return ret1; + } + + return ret; + } + + if (!priv->ring->tx_spare) + netdev_info(netdev, "the active tx spare buf size is 0, disable tx spare buffer\n"); + else + netdev_info(netdev, "the active tx spare buf size is %u, due to page order\n", + priv->ring->tx_spare->len); + + break; + default: + ret = -EOPNOTSUPP; + break; + } + + return ret; +} + +#define HNS3_ETHTOOL_COALESCE (ETHTOOL_COALESCE_USECS | \ + ETHTOOL_COALESCE_USE_ADAPTIVE | \ + ETHTOOL_COALESCE_RX_USECS_HIGH | \ + ETHTOOL_COALESCE_TX_USECS_HIGH | \ + ETHTOOL_COALESCE_MAX_FRAMES | \ + ETHTOOL_COALESCE_USE_CQE) + +#define HNS3_ETHTOOL_RING (ETHTOOL_RING_USE_RX_BUF_LEN | \ + ETHTOOL_RING_USE_TX_PUSH) + +static int hns3_get_ts_info(struct net_device *netdev, + struct ethtool_ts_info *info) +{ + struct hnae3_handle *handle = hns3_get_handle(netdev); + + if (handle->ae_algo->ops->get_ts_info) + return handle->ae_algo->ops->get_ts_info(handle, info); + + return ethtool_op_get_ts_info(netdev, info); +} + +static const struct hns3_ethtool_link_ext_state_mapping +hns3_link_ext_state_map[] = { + {1, ETHTOOL_LINK_EXT_STATE_AUTONEG, + ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_HCD}, + {2, ETHTOOL_LINK_EXT_STATE_AUTONEG, + ETHTOOL_LINK_EXT_SUBSTATE_AN_ACK_NOT_RECEIVED}, + + {256, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE, + ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_INHIBIT_TIMEOUT}, + {257, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE, + ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_PARTNER_DID_NOT_SET_RECEIVER_READY}, + {512, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE, + ETHTOOL_LINK_EXT_SUBSTATE_LT_REMOTE_FAULT}, + + {513, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_BLOCK_LOCK}, + {514, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_FC_FEC_IS_NOT_LOCKED}, + {515, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_RS_FEC_IS_NOT_LOCKED}, + + {768, ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY, + ETHTOOL_LINK_EXT_SUBSTATE_BSI_LARGE_NUMBER_OF_PHYSICAL_ERRORS}, + {769, ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY, + ETHTOOL_LINK_EXT_SUBSTATE_BSI_SERDES_REFERENCE_CLOCK_LOST}, + {770, ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY, + ETHTOOL_LINK_EXT_SUBSTATE_BSI_SERDES_ALOS}, + + {1024, ETHTOOL_LINK_EXT_STATE_NO_CABLE, 0}, + {1025, ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE, + ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE}, + + {1026, ETHTOOL_LINK_EXT_STATE_EEPROM_ISSUE, 0}, +}; + +static int hns3_get_link_ext_state(struct net_device *netdev, + struct ethtool_link_ext_state_info *info) +{ + const struct hns3_ethtool_link_ext_state_mapping *map; + struct hnae3_handle *h = hns3_get_handle(netdev); + u32 status_code, i; + int ret; + + if (netif_carrier_ok(netdev)) + return -ENODATA; + + if (!h->ae_algo->ops->get_link_diagnosis_info) + return -EOPNOTSUPP; + + ret = h->ae_algo->ops->get_link_diagnosis_info(h, &status_code); + if (ret) + return ret; + + for (i = 0; i < ARRAY_SIZE(hns3_link_ext_state_map); i++) { + map = &hns3_link_ext_state_map[i]; + if (map->status_code == status_code) { + info->link_ext_state = map->link_ext_state; + info->__link_ext_substate = map->link_ext_substate; + return 0; + } + } + + return -ENODATA; +} + +static const struct ethtool_ops hns3vf_ethtool_ops = { + .supported_coalesce_params = HNS3_ETHTOOL_COALESCE, + .supported_ring_params = HNS3_ETHTOOL_RING, + .get_drvinfo = hns3_get_drvinfo, + .get_ringparam = hns3_get_ringparam, + .set_ringparam = hns3_set_ringparam, + .get_strings = hns3_get_strings, + .get_ethtool_stats = hns3_get_stats, + .get_sset_count = hns3_get_sset_count, + .get_rxnfc = hns3_get_rxnfc, + .set_rxnfc = hns3_set_rxnfc, + .get_rxfh_key_size = hns3_get_rss_key_size, + .get_rxfh_indir_size = hns3_get_rss_indir_size, + .get_rxfh = hns3_get_rss, + .set_rxfh = hns3_set_rss, + .get_link_ksettings = hns3_get_link_ksettings, + .get_channels = hns3_get_channels, + .set_channels = hns3_set_channels, + .get_coalesce = hns3_get_coalesce, + .set_coalesce = hns3_set_coalesce, + .get_regs_len = hns3_get_regs_len, + .get_regs = hns3_get_regs, + .get_link = hns3_get_link, + .get_msglevel = hns3_get_msglevel, + .set_msglevel = hns3_set_msglevel, + .get_priv_flags = hns3_get_priv_flags, + .set_priv_flags = hns3_set_priv_flags, + .get_tunable = hns3_get_tunable, + .set_tunable = hns3_set_tunable, + .reset = hns3_set_reset, +}; + +static const struct ethtool_ops hns3_ethtool_ops = { + .supported_coalesce_params = HNS3_ETHTOOL_COALESCE, + .supported_ring_params = HNS3_ETHTOOL_RING, + .cap_link_lanes_supported = true, + .self_test = hns3_self_test, + .get_drvinfo = hns3_get_drvinfo, + .get_link = hns3_get_link, + .get_ringparam = hns3_get_ringparam, + .set_ringparam = hns3_set_ringparam, + .get_pauseparam = hns3_get_pauseparam, + .set_pauseparam = hns3_set_pauseparam, + .get_strings = hns3_get_strings, + .get_ethtool_stats = hns3_get_stats, + .get_sset_count = hns3_get_sset_count, + .get_rxnfc = hns3_get_rxnfc, + .set_rxnfc = hns3_set_rxnfc, + .get_rxfh_key_size = hns3_get_rss_key_size, + .get_rxfh_indir_size = hns3_get_rss_indir_size, + .get_rxfh = hns3_get_rss, + .set_rxfh = hns3_set_rss, + .get_link_ksettings = hns3_get_link_ksettings, + .set_link_ksettings = hns3_set_link_ksettings, + .nway_reset = hns3_nway_reset, + .get_channels = hns3_get_channels, + .set_channels = hns3_set_channels, + .get_coalesce = hns3_get_coalesce, + .set_coalesce = hns3_set_coalesce, + .get_regs_len = hns3_get_regs_len, + .get_regs = hns3_get_regs, + .set_phys_id = hns3_set_phys_id, + .get_msglevel = hns3_get_msglevel, + .set_msglevel = hns3_set_msglevel, + .get_fecparam = hns3_get_fecparam, + .set_fecparam = hns3_set_fecparam, + .get_fec_stats = hns3_get_fec_stats, + .get_module_info = hns3_get_module_info, + .get_module_eeprom = hns3_get_module_eeprom, + .get_priv_flags = hns3_get_priv_flags, + .set_priv_flags = hns3_set_priv_flags, + .get_ts_info = hns3_get_ts_info, + .get_tunable = hns3_get_tunable, + .set_tunable = hns3_set_tunable, + .reset = hns3_set_reset, + .get_link_ext_state = hns3_get_link_ext_state, +}; + +void hns3_ethtool_set_ops(struct net_device *netdev) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (h->flags & HNAE3_SUPPORT_VF) + netdev->ethtool_ops = &hns3vf_ethtool_ops; + else + netdev->ethtool_ops = &hns3_ethtool_ops; +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h new file mode 100644 index 000000000..da207d1d9 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +// Copyright (c) 2021 Hisilicon Limited. + +#ifndef __HNS3_ETHTOOL_H +#define __HNS3_ETHTOOL_H + +#include +#include + +struct hns3_stats { + char stats_string[ETH_GSTRING_LEN]; + int stats_offset; +}; + +struct hns3_sfp_type { + u8 type; + u8 ext_type; +}; + +struct hns3_pflag_desc { + char name[ETH_GSTRING_LEN]; + void (*handler)(struct net_device *netdev, bool enable); +}; + +struct hns3_ethtool_link_ext_state_mapping { + u32 status_code; + enum ethtool_link_ext_state link_ext_state; + u8 link_ext_substate; +}; + +struct hns3_ring_param { + u32 tx_desc_num; + u32 rx_desc_num; + u32 rx_buf_len; +}; + +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3_trace.h new file mode 100644 index 000000000..b8a1ecb4b --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_trace.h @@ -0,0 +1,138 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2018-2019 Hisilicon Limited. */ + +/* This must be outside ifdef _HNS3_TRACE_H */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM hns3 + +#if !defined(_HNS3_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) +#define _HNS3_TRACE_H_ + +#include + +#define DESC_NR (sizeof(struct hns3_desc) / sizeof(u32)) + +DECLARE_EVENT_CLASS(hns3_skb_template, + TP_PROTO(struct sk_buff *skb), + TP_ARGS(skb), + + TP_STRUCT__entry( + __field(unsigned int, headlen) + __field(unsigned int, len) + __field(__u8, nr_frags) + __field(__u8, ip_summed) + __field(unsigned int, hdr_len) + __field(unsigned short, gso_size) + __field(unsigned short, gso_segs) + __field(unsigned int, gso_type) + __field(bool, fraglist) + __array(__u32, size, MAX_SKB_FRAGS) + ), + + TP_fast_assign( + __entry->headlen = skb_headlen(skb); + __entry->len = skb->len; + __entry->nr_frags = skb_shinfo(skb)->nr_frags; + __entry->gso_size = skb_shinfo(skb)->gso_size; + __entry->gso_segs = skb_shinfo(skb)->gso_segs; + __entry->gso_type = skb_shinfo(skb)->gso_type; + __entry->hdr_len = skb->encapsulation ? + skb_inner_tcp_all_headers(skb) : skb_tcp_all_headers(skb); + __entry->ip_summed = skb->ip_summed; + __entry->fraglist = skb_has_frag_list(skb); + hns3_shinfo_pack(skb_shinfo(skb), __entry->size); + ), + + TP_printk( + "len: %u, %u, %u, cs: %u, gso: %u, %u, %x, frag(%d %u): %s", + __entry->headlen, __entry->len, __entry->hdr_len, + __entry->ip_summed, __entry->gso_size, __entry->gso_segs, + __entry->gso_type, __entry->fraglist, __entry->nr_frags, + __print_array(__entry->size, MAX_SKB_FRAGS, sizeof(__u32)) + ) +); + +DEFINE_EVENT(hns3_skb_template, hns3_over_max_bd, + TP_PROTO(struct sk_buff *skb), + TP_ARGS(skb)); + +DEFINE_EVENT(hns3_skb_template, hns3_gro, + TP_PROTO(struct sk_buff *skb), + TP_ARGS(skb)); + +DEFINE_EVENT(hns3_skb_template, hns3_tso, + TP_PROTO(struct sk_buff *skb), + TP_ARGS(skb)); + +TRACE_EVENT(hns3_tx_desc, + TP_PROTO(struct hns3_enet_ring *ring, int cur_ntu), + TP_ARGS(ring, cur_ntu), + + TP_STRUCT__entry( + __field(int, index) + __field(int, ntu) + __field(int, ntc) + __field(dma_addr_t, desc_dma) + __array(u32, desc, DESC_NR) + __string(devname, ring->tqp->handle->kinfo.netdev->name) + ), + + TP_fast_assign( + __entry->index = ring->tqp->tqp_index; + __entry->ntu = ring->next_to_use; + __entry->ntc = ring->next_to_clean; + __entry->desc_dma = ring->desc_dma_addr, + memcpy(__entry->desc, &ring->desc[cur_ntu], + sizeof(struct hns3_desc)); + __assign_str(devname, ring->tqp->handle->kinfo.netdev->name); + ), + + TP_printk( + "%s-%d-%d/%d desc(%pad): %s", + __get_str(devname), __entry->index, __entry->ntu, + __entry->ntc, &__entry->desc_dma, + __print_array(__entry->desc, DESC_NR, sizeof(u32)) + ) +); + +TRACE_EVENT(hns3_rx_desc, + TP_PROTO(struct hns3_enet_ring *ring), + TP_ARGS(ring), + + TP_STRUCT__entry( + __field(int, index) + __field(int, ntu) + __field(int, ntc) + __field(dma_addr_t, desc_dma) + __field(dma_addr_t, buf_dma) + __array(u32, desc, DESC_NR) + __string(devname, ring->tqp->handle->kinfo.netdev->name) + ), + + TP_fast_assign( + __entry->index = ring->tqp->tqp_index; + __entry->ntu = ring->next_to_use; + __entry->ntc = ring->next_to_clean; + __entry->desc_dma = ring->desc_dma_addr; + __entry->buf_dma = ring->desc_cb[ring->next_to_clean].dma; + memcpy(__entry->desc, &ring->desc[ring->next_to_clean], + sizeof(struct hns3_desc)); + __assign_str(devname, ring->tqp->handle->kinfo.netdev->name); + ), + + TP_printk( + "%s-%d-%d/%d desc(%pad) buf(%pad): %s", + __get_str(devname), __entry->index, __entry->ntu, + __entry->ntc, &__entry->desc_dma, &__entry->buf_dma, + __print_array(__entry->desc, DESC_NR, sizeof(u32)) + ) +); + +#endif /* _HNS3_TRACE_H_ */ + +/* This must be outside ifdef _HNS3_TRACE_H */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE hns3_trace +#include diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h new file mode 100644 index 000000000..43cada51d --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -0,0 +1,881 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +// Copyright (c) 2016-2017 Hisilicon Limited. + +#ifndef __HCLGE_CMD_H +#define __HCLGE_CMD_H +#include +#include +#include +#include "hnae3.h" +#include "hclge_comm_cmd.h" + +struct hclge_dev; + +#define HCLGE_CMDQ_RX_INVLD_B 0 +#define HCLGE_CMDQ_RX_OUTVLD_B 1 + +struct hclge_misc_vector { + u8 __iomem *addr; + int vector_irq; + char name[HNAE3_INT_NAME_LEN]; +}; + +#define hclge_cmd_setup_basic_desc(desc, opcode, is_read) \ + hclge_comm_cmd_setup_basic_desc(desc, opcode, is_read) + +#define HCLGE_TQP_REG_OFFSET 0x80000 +#define HCLGE_TQP_REG_SIZE 0x200 + +#define HCLGE_TQP_MAX_SIZE_DEV_V2 1024 +#define HCLGE_TQP_EXT_REG_OFFSET 0x100 + +#define HCLGE_RCB_INIT_QUERY_TIMEOUT 10 +#define HCLGE_RCB_INIT_FLAG_EN_B 0 +#define HCLGE_RCB_INIT_FLAG_FINI_B 8 +struct hclge_config_rcb_init_cmd { + __le16 rcb_init_flag; + u8 rsv[22]; +}; + +struct hclge_tqp_map_cmd { + __le16 tqp_id; /* Absolute tqp id for in this pf */ + u8 tqp_vf; /* VF id */ +#define HCLGE_TQP_MAP_TYPE_PF 0 +#define HCLGE_TQP_MAP_TYPE_VF 1 +#define HCLGE_TQP_MAP_TYPE_B 0 +#define HCLGE_TQP_MAP_EN_B 1 + u8 tqp_flag; /* Indicate it's pf or vf tqp */ + __le16 tqp_vid; /* Virtual id in this pf/vf */ + u8 rsv[18]; +}; + +#define HCLGE_VECTOR_ELEMENTS_PER_CMD 10 + +enum hclge_int_type { + HCLGE_INT_TX, + HCLGE_INT_RX, + HCLGE_INT_EVENT, +}; + +struct hclge_ctrl_vector_chain_cmd { +#define HCLGE_VECTOR_ID_L_S 0 +#define HCLGE_VECTOR_ID_L_M GENMASK(7, 0) + u8 int_vector_id_l; + u8 int_cause_num; +#define HCLGE_INT_TYPE_S 0 +#define HCLGE_INT_TYPE_M GENMASK(1, 0) +#define HCLGE_TQP_ID_S 2 +#define HCLGE_TQP_ID_M GENMASK(12, 2) +#define HCLGE_INT_GL_IDX_S 13 +#define HCLGE_INT_GL_IDX_M GENMASK(14, 13) + __le16 tqp_type_and_id[HCLGE_VECTOR_ELEMENTS_PER_CMD]; + u8 vfid; +#define HCLGE_VECTOR_ID_H_S 8 +#define HCLGE_VECTOR_ID_H_M GENMASK(15, 8) + u8 int_vector_id_h; +}; + +#define HCLGE_MAX_TC_NUM 8 +#define HCLGE_TC0_PRI_BUF_EN_B 15 /* Bit 15 indicate enable or not */ +#define HCLGE_BUF_UNIT_S 7 /* Buf size is united by 128 bytes */ +struct hclge_tx_buff_alloc_cmd { + __le16 tx_pkt_buff[HCLGE_MAX_TC_NUM]; + u8 tx_buff_rsv[8]; +}; + +struct hclge_rx_priv_buff_cmd { + __le16 buf_num[HCLGE_MAX_TC_NUM]; + __le16 shared_buf; + u8 rsv[6]; +}; + +#define HCLGE_RX_PRIV_EN_B 15 +#define HCLGE_TC_NUM_ONE_DESC 4 +struct hclge_priv_wl { + __le16 high; + __le16 low; +}; + +struct hclge_rx_priv_wl_buf { + struct hclge_priv_wl tc_wl[HCLGE_TC_NUM_ONE_DESC]; +}; + +struct hclge_rx_com_thrd { + struct hclge_priv_wl com_thrd[HCLGE_TC_NUM_ONE_DESC]; +}; + +struct hclge_rx_com_wl { + struct hclge_priv_wl com_wl; +}; + +struct hclge_waterline { + u32 low; + u32 high; +}; + +struct hclge_tc_thrd { + u32 low; + u32 high; +}; + +struct hclge_priv_buf { + struct hclge_waterline wl; /* Waterline for low and high */ + u32 buf_size; /* TC private buffer size */ + u32 tx_buf_size; + u32 enable; /* Enable TC private buffer or not */ +}; + +struct hclge_shared_buf { + struct hclge_waterline self; + struct hclge_tc_thrd tc_thrd[HCLGE_MAX_TC_NUM]; + u32 buf_size; +}; + +struct hclge_pkt_buf_alloc { + struct hclge_priv_buf priv_buf[HCLGE_MAX_TC_NUM]; + struct hclge_shared_buf s_buf; +}; + +#define HCLGE_RX_COM_WL_EN_B 15 +struct hclge_rx_com_wl_buf_cmd { + __le16 high_wl; + __le16 low_wl; + u8 rsv[20]; +}; + +#define HCLGE_RX_PKT_EN_B 15 +struct hclge_rx_pkt_buf_cmd { + __le16 high_pkt; + __le16 low_pkt; + u8 rsv[20]; +}; + +#define HCLGE_PF_STATE_DONE_B 0 +#define HCLGE_PF_STATE_MAIN_B 1 +#define HCLGE_PF_STATE_BOND_B 2 +#define HCLGE_PF_STATE_MAC_N_B 6 +#define HCLGE_PF_MAC_NUM_MASK 0x3 +#define HCLGE_PF_STATE_MAIN BIT(HCLGE_PF_STATE_MAIN_B) +#define HCLGE_PF_STATE_DONE BIT(HCLGE_PF_STATE_DONE_B) +#define HCLGE_VF_RST_STATUS_CMD 4 + +struct hclge_func_status_cmd { + __le32 vf_rst_state[HCLGE_VF_RST_STATUS_CMD]; + u8 pf_state; + u8 mac_id; + u8 rsv1; + u8 pf_cnt_in_mac; + u8 pf_num; + u8 vf_num; + u8 rsv[2]; +}; + +struct hclge_pf_res_cmd { + __le16 tqp_num; + __le16 buf_size; + __le16 msixcap_localid_ba_nic; + __le16 msixcap_localid_number_nic; + __le16 pf_intr_vector_number_roce; + __le16 pf_own_fun_number; + __le16 tx_buf_size; + __le16 dv_buf_size; + __le16 ext_tqp_num; + u8 rsv[6]; +}; + +#define HCLGE_CFG_OFFSET_S 0 +#define HCLGE_CFG_OFFSET_M GENMASK(19, 0) +#define HCLGE_CFG_RD_LEN_S 24 +#define HCLGE_CFG_RD_LEN_M GENMASK(27, 24) +#define HCLGE_CFG_RD_LEN_BYTES 16 +#define HCLGE_CFG_RD_LEN_UNIT 4 + +#define HCLGE_CFG_TC_NUM_S 8 +#define HCLGE_CFG_TC_NUM_M GENMASK(15, 8) +#define HCLGE_CFG_TQP_DESC_N_S 16 +#define HCLGE_CFG_TQP_DESC_N_M GENMASK(31, 16) +#define HCLGE_CFG_PHY_ADDR_S 0 +#define HCLGE_CFG_PHY_ADDR_M GENMASK(7, 0) +#define HCLGE_CFG_MEDIA_TP_S 8 +#define HCLGE_CFG_MEDIA_TP_M GENMASK(15, 8) +#define HCLGE_CFG_RX_BUF_LEN_S 16 +#define HCLGE_CFG_RX_BUF_LEN_M GENMASK(31, 16) +#define HCLGE_CFG_MAC_ADDR_H_S 0 +#define HCLGE_CFG_MAC_ADDR_H_M GENMASK(15, 0) +#define HCLGE_CFG_DEFAULT_SPEED_S 16 +#define HCLGE_CFG_DEFAULT_SPEED_M GENMASK(23, 16) +#define HCLGE_CFG_RSS_SIZE_S 24 +#define HCLGE_CFG_RSS_SIZE_M GENMASK(31, 24) +#define HCLGE_CFG_SPEED_ABILITY_S 0 +#define HCLGE_CFG_SPEED_ABILITY_M GENMASK(7, 0) +#define HCLGE_CFG_SPEED_ABILITY_EXT_S 10 +#define HCLGE_CFG_SPEED_ABILITY_EXT_M GENMASK(15, 10) +#define HCLGE_CFG_VLAN_FLTR_CAP_S 8 +#define HCLGE_CFG_VLAN_FLTR_CAP_M GENMASK(9, 8) +#define HCLGE_CFG_UMV_TBL_SPACE_S 16 +#define HCLGE_CFG_UMV_TBL_SPACE_M GENMASK(31, 16) +#define HCLGE_CFG_PF_RSS_SIZE_S 0 +#define HCLGE_CFG_PF_RSS_SIZE_M GENMASK(3, 0) +#define HCLGE_CFG_TX_SPARE_BUF_SIZE_S 4 +#define HCLGE_CFG_TX_SPARE_BUF_SIZE_M GENMASK(15, 4) + +#define HCLGE_CFG_CMD_CNT 4 + +struct hclge_cfg_param_cmd { + __le32 offset; + __le32 rsv; + __le32 param[HCLGE_CFG_CMD_CNT]; +}; + +#define HCLGE_MAC_MODE 0x0 +#define HCLGE_DESC_NUM 0x40 + +#define HCLGE_ALLOC_VALID_B 0 +struct hclge_vf_num_cmd { + u8 alloc_valid; + u8 rsv[23]; +}; + +#define HCLGE_RSS_DEFAULT_OUTPORT_B 4 + +#define HCLGE_RSS_CFG_TBL_SIZE_H 4 +#define HCLGE_RSS_CFG_TBL_BW_L 8U + +#define HCLGE_RSS_TC_OFFSET_S 0 +#define HCLGE_RSS_TC_OFFSET_M GENMASK(10, 0) +#define HCLGE_RSS_TC_SIZE_MSB_B 11 +#define HCLGE_RSS_TC_SIZE_S 12 +#define HCLGE_RSS_TC_SIZE_M GENMASK(14, 12) +#define HCLGE_RSS_TC_SIZE_MSB_OFFSET 3 +#define HCLGE_RSS_TC_VALID_B 15 + +#define HCLGE_LINK_STATUS_UP_B 0 +#define HCLGE_LINK_STATUS_UP_M BIT(HCLGE_LINK_STATUS_UP_B) +struct hclge_link_status_cmd { + u8 status; + u8 rsv[23]; +}; + +/* for DEVICE_VERSION_V1/2, reference to promisc cmd byte8 */ +#define HCLGE_PROMISC_EN_UC 1 +#define HCLGE_PROMISC_EN_MC 2 +#define HCLGE_PROMISC_EN_BC 3 +#define HCLGE_PROMISC_TX_EN 4 +#define HCLGE_PROMISC_RX_EN 5 + +/* for DEVICE_VERSION_V3, reference to promisc cmd byte10 */ +#define HCLGE_PROMISC_UC_RX_EN 2 +#define HCLGE_PROMISC_MC_RX_EN 3 +#define HCLGE_PROMISC_BC_RX_EN 4 +#define HCLGE_PROMISC_UC_TX_EN 5 +#define HCLGE_PROMISC_MC_TX_EN 6 +#define HCLGE_PROMISC_BC_TX_EN 7 + +struct hclge_promisc_cfg_cmd { + u8 promisc; + u8 vf_id; + u8 extend_promisc; + u8 rsv0[21]; +}; + +enum hclge_promisc_type { + HCLGE_UNICAST = 1, + HCLGE_MULTICAST = 2, + HCLGE_BROADCAST = 3, +}; + +#define HCLGE_MAC_TX_EN_B 6 +#define HCLGE_MAC_RX_EN_B 7 +#define HCLGE_MAC_PAD_TX_B 11 +#define HCLGE_MAC_PAD_RX_B 12 +#define HCLGE_MAC_1588_TX_B 13 +#define HCLGE_MAC_1588_RX_B 14 +#define HCLGE_MAC_APP_LP_B 15 +#define HCLGE_MAC_LINE_LP_B 16 +#define HCLGE_MAC_FCS_TX_B 17 +#define HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B 18 +#define HCLGE_MAC_RX_FCS_STRIP_B 19 +#define HCLGE_MAC_RX_FCS_B 20 +#define HCLGE_MAC_TX_UNDER_MIN_ERR_B 21 +#define HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B 22 + +struct hclge_config_mac_mode_cmd { + __le32 txrx_pad_fcs_loop_en; + u8 rsv[20]; +}; + +struct hclge_pf_rst_sync_cmd { +#define HCLGE_PF_RST_ALL_VF_RDY_B 0 + u8 all_vf_ready; + u8 rsv[23]; +}; + +#define HCLGE_CFG_SPEED_S 0 +#define HCLGE_CFG_SPEED_M GENMASK(5, 0) + +#define HCLGE_CFG_DUPLEX_B 7 +#define HCLGE_CFG_DUPLEX_M BIT(HCLGE_CFG_DUPLEX_B) + +struct hclge_config_mac_speed_dup_cmd { + u8 speed_dup; + +#define HCLGE_CFG_MAC_SPEED_CHANGE_EN_B 0 + u8 mac_change_fec_en; + u8 rsv[4]; + u8 lane_num; + u8 rsv1[17]; +}; + +#define HCLGE_TQP_ENABLE_B 0 + +#define HCLGE_MAC_CFG_AN_EN_B 0 +#define HCLGE_MAC_CFG_AN_INT_EN_B 1 +#define HCLGE_MAC_CFG_AN_INT_MSK_B 2 +#define HCLGE_MAC_CFG_AN_INT_CLR_B 3 +#define HCLGE_MAC_CFG_AN_RST_B 4 + +#define HCLGE_MAC_CFG_AN_EN BIT(HCLGE_MAC_CFG_AN_EN_B) + +struct hclge_config_auto_neg_cmd { + __le32 cfg_an_cmd_flag; + u8 rsv[20]; +}; + +struct hclge_sfp_info_cmd { + __le32 speed; + u8 query_type; /* 0: sfp speed, 1: active speed */ + u8 active_fec; + u8 autoneg; /* autoneg state */ + u8 autoneg_ability; /* whether support autoneg */ + __le32 speed_ability; /* speed ability for current media */ + __le32 module_type; + u8 fec_ability; + u8 lane_num; + u8 rsv[6]; +}; + +#define HCLGE_MAC_CFG_FEC_AUTO_EN_B 0 +#define HCLGE_MAC_CFG_FEC_MODE_S 1 +#define HCLGE_MAC_CFG_FEC_MODE_M GENMASK(3, 1) +#define HCLGE_MAC_CFG_FEC_SET_DEF_B 0 +#define HCLGE_MAC_CFG_FEC_CLR_DEF_B 1 + +#define HCLGE_MAC_FEC_OFF 0 +#define HCLGE_MAC_FEC_BASER 1 +#define HCLGE_MAC_FEC_RS 2 +#define HCLGE_MAC_FEC_LLRS 3 +struct hclge_config_fec_cmd { + u8 fec_mode; + u8 default_config; + u8 rsv[22]; +}; + +#define HCLGE_FEC_STATS_CMD_NUM 4 + +struct hclge_query_fec_stats_cmd { + /* fec rs mode total stats */ + __le32 rs_fec_corr_blocks; + __le32 rs_fec_uncorr_blocks; + __le32 rs_fec_error_blocks; + /* fec base-r mode per lanes stats */ + u8 base_r_lane_num; + u8 rsv[3]; + __le32 base_r_fec_corr_blocks; + __le32 base_r_fec_uncorr_blocks; +}; + +#define HCLGE_MAC_UPLINK_PORT 0x100 + +struct hclge_config_max_frm_size_cmd { + __le16 max_frm_size; + u8 min_frm_size; + u8 rsv[21]; +}; + +enum hclge_mac_vlan_tbl_opcode { + HCLGE_MAC_VLAN_ADD, /* Add new or modify mac_vlan */ + HCLGE_MAC_VLAN_UPDATE, /* Modify other fields of this table */ + HCLGE_MAC_VLAN_REMOVE, /* Remove a entry through mac_vlan key */ + HCLGE_MAC_VLAN_LKUP, /* Lookup a entry through mac_vlan key */ +}; + +enum hclge_mac_vlan_add_resp_code { + HCLGE_ADD_UC_OVERFLOW = 2, /* ADD failed for UC overflow */ + HCLGE_ADD_MC_OVERFLOW, /* ADD failed for MC overflow */ +}; + +#define HCLGE_MAC_VLAN_BIT0_EN_B 0 +#define HCLGE_MAC_VLAN_BIT1_EN_B 1 +#define HCLGE_MAC_EPORT_SW_EN_B 12 +#define HCLGE_MAC_EPORT_TYPE_B 11 +#define HCLGE_MAC_EPORT_VFID_S 3 +#define HCLGE_MAC_EPORT_VFID_M GENMASK(10, 3) +#define HCLGE_MAC_EPORT_PFID_S 0 +#define HCLGE_MAC_EPORT_PFID_M GENMASK(2, 0) +struct hclge_mac_vlan_tbl_entry_cmd { + u8 flags; + u8 resp_code; + __le16 vlan_tag; + __le32 mac_addr_hi32; + __le16 mac_addr_lo16; + __le16 rsv1; + u8 entry_type; + u8 mc_mac_en; + __le16 egress_port; + __le16 egress_queue; + u8 rsv2[6]; +}; + +#define HCLGE_UMV_SPC_ALC_B 0 +struct hclge_umv_spc_alc_cmd { + u8 allocate; + u8 rsv1[3]; + __le32 space_size; + u8 rsv2[16]; +}; + +#define HCLGE_MAC_MGR_MASK_VLAN_B BIT(0) +#define HCLGE_MAC_MGR_MASK_MAC_B BIT(1) +#define HCLGE_MAC_MGR_MASK_ETHERTYPE_B BIT(2) + +struct hclge_mac_mgr_tbl_entry_cmd { + u8 flags; + u8 resp_code; + __le16 vlan_tag; + u8 mac_addr[ETH_ALEN]; + __le16 rsv1; + __le16 ethter_type; + __le16 egress_port; + __le16 egress_queue; + u8 sw_port_id_aware; + u8 rsv2; + u8 i_port_bitmap; + u8 i_port_direction; + u8 rsv3[2]; +}; + +struct hclge_vlan_filter_ctrl_cmd { + u8 vlan_type; + u8 vlan_fe; + u8 rsv1[2]; + u8 vf_id; + u8 rsv2[19]; +}; + +#define HCLGE_VLAN_ID_OFFSET_STEP 160 +#define HCLGE_VLAN_BYTE_SIZE 8 +#define HCLGE_VLAN_OFFSET_BITMAP \ + (HCLGE_VLAN_ID_OFFSET_STEP / HCLGE_VLAN_BYTE_SIZE) + +struct hclge_vlan_filter_pf_cfg_cmd { + u8 vlan_offset; + u8 vlan_cfg; + u8 rsv[2]; + u8 vlan_offset_bitmap[HCLGE_VLAN_OFFSET_BITMAP]; +}; + +#define HCLGE_MAX_VF_BYTES 16 + +struct hclge_vlan_filter_vf_cfg_cmd { + __le16 vlan_id; + u8 resp_code; + u8 rsv; + u8 vlan_cfg; + u8 rsv1[3]; + u8 vf_bitmap[HCLGE_MAX_VF_BYTES]; +}; + +#define HCLGE_INGRESS_BYPASS_B 0 +struct hclge_port_vlan_filter_bypass_cmd { + u8 bypass_state; + u8 rsv1[3]; + u8 vf_id; + u8 rsv2[19]; +}; + +#define HCLGE_SWITCH_ANTI_SPOOF_B 0U +#define HCLGE_SWITCH_ALW_LPBK_B 1U +#define HCLGE_SWITCH_ALW_LCL_LPBK_B 2U +#define HCLGE_SWITCH_ALW_DST_OVRD_B 3U +#define HCLGE_SWITCH_NO_MASK 0x0 +#define HCLGE_SWITCH_ANTI_SPOOF_MASK 0xFE +#define HCLGE_SWITCH_ALW_LPBK_MASK 0xFD +#define HCLGE_SWITCH_ALW_LCL_LPBK_MASK 0xFB +#define HCLGE_SWITCH_LW_DST_OVRD_MASK 0xF7 + +struct hclge_mac_vlan_switch_cmd { + u8 roce_sel; + u8 rsv1[3]; + __le32 func_id; + u8 switch_param; + u8 rsv2[3]; + u8 param_mask; + u8 rsv3[11]; +}; + +enum hclge_mac_vlan_cfg_sel { + HCLGE_MAC_VLAN_NIC_SEL = 0, + HCLGE_MAC_VLAN_ROCE_SEL, +}; + +#define HCLGE_ACCEPT_TAG1_B 0 +#define HCLGE_ACCEPT_UNTAG1_B 1 +#define HCLGE_PORT_INS_TAG1_EN_B 2 +#define HCLGE_PORT_INS_TAG2_EN_B 3 +#define HCLGE_CFG_NIC_ROCE_SEL_B 4 +#define HCLGE_ACCEPT_TAG2_B 5 +#define HCLGE_ACCEPT_UNTAG2_B 6 +#define HCLGE_TAG_SHIFT_MODE_EN_B 7 +#define HCLGE_VF_NUM_PER_BYTE 8 + +struct hclge_vport_vtag_tx_cfg_cmd { + u8 vport_vlan_cfg; + u8 vf_offset; + u8 rsv1[2]; + __le16 def_vlan_tag1; + __le16 def_vlan_tag2; + u8 vf_bitmap[HCLGE_VF_NUM_PER_BYTE]; + u8 rsv2[8]; +}; + +#define HCLGE_REM_TAG1_EN_B 0 +#define HCLGE_REM_TAG2_EN_B 1 +#define HCLGE_SHOW_TAG1_EN_B 2 +#define HCLGE_SHOW_TAG2_EN_B 3 +#define HCLGE_DISCARD_TAG1_EN_B 5 +#define HCLGE_DISCARD_TAG2_EN_B 6 +struct hclge_vport_vtag_rx_cfg_cmd { + u8 vport_vlan_cfg; + u8 vf_offset; + u8 rsv1[6]; + u8 vf_bitmap[HCLGE_VF_NUM_PER_BYTE]; + u8 rsv2[8]; +}; + +struct hclge_tx_vlan_type_cfg_cmd { + __le16 ot_vlan_type; + __le16 in_vlan_type; + u8 rsv[20]; +}; + +struct hclge_rx_vlan_type_cfg_cmd { + __le16 ot_fst_vlan_type; + __le16 ot_sec_vlan_type; + __le16 in_fst_vlan_type; + __le16 in_sec_vlan_type; + u8 rsv[16]; +}; + +struct hclge_cfg_com_tqp_queue_cmd { + __le16 tqp_id; + __le16 stream_id; + u8 enable; + u8 rsv[19]; +}; + +struct hclge_cfg_tx_queue_pointer_cmd { + __le16 tqp_id; + __le16 tx_tail; + __le16 tx_head; + __le16 fbd_num; + __le16 ring_offset; + u8 rsv[14]; +}; + +#pragma pack(1) +struct hclge_mac_ethertype_idx_rd_cmd { + u8 flags; + u8 resp_code; + __le16 vlan_tag; + u8 mac_addr[ETH_ALEN]; + __le16 index; + __le16 ethter_type; + __le16 egress_port; + __le16 egress_queue; + __le16 rev0; + u8 i_port_bitmap; + u8 i_port_direction; + u8 rev1[2]; +}; + +#pragma pack() + +#define HCLGE_TSO_MSS_MIN_S 0 +#define HCLGE_TSO_MSS_MIN_M GENMASK(13, 0) + +#define HCLGE_TSO_MSS_MAX_S 16 +#define HCLGE_TSO_MSS_MAX_M GENMASK(29, 16) + +struct hclge_cfg_tso_status_cmd { + __le16 tso_mss_min; + __le16 tso_mss_max; + u8 rsv[20]; +}; + +#define HCLGE_GRO_EN_B 0 +struct hclge_cfg_gro_status_cmd { + u8 gro_en; + u8 rsv[23]; +}; + +#define HCLGE_TSO_MSS_MIN 256 +#define HCLGE_TSO_MSS_MAX 9668 + +#define HCLGE_TQP_RESET_B 0 +struct hclge_reset_tqp_queue_cmd { + __le16 tqp_id; + u8 reset_req; + u8 ready_to_reset; + u8 rsv[20]; +}; + +#define HCLGE_CFG_RESET_MAC_B 3 +#define HCLGE_CFG_RESET_FUNC_B 7 +#define HCLGE_CFG_RESET_RCB_B 1 +struct hclge_reset_cmd { + u8 mac_func_reset; + u8 fun_reset_vfid; + u8 fun_reset_rcb; + u8 rsv; + __le16 fun_reset_rcb_vqid_start; + __le16 fun_reset_rcb_vqid_num; + u8 fun_reset_rcb_return_status; + u8 rsv1[15]; +}; + +#define HCLGE_PF_RESET_DONE_BIT BIT(0) + +struct hclge_pf_rst_done_cmd { + u8 pf_rst_done; + u8 rsv[23]; +}; + +#define HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B BIT(0) +#define HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B BIT(2) +#define HCLGE_CMD_GE_PHY_INNER_LOOP_B BIT(3) +#define HCLGE_CMD_COMMON_LB_DONE_B BIT(0) +#define HCLGE_CMD_COMMON_LB_SUCCESS_B BIT(1) +struct hclge_common_lb_cmd { + u8 mask; + u8 enable; + u8 result; + u8 rsv[21]; +}; + +#define HCLGE_DEFAULT_TX_BUF 0x4000 /* 16k bytes */ +#define HCLGE_TOTAL_PKT_BUF 0x108000 /* 1.03125M bytes */ +#define HCLGE_DEFAULT_DV 0xA000 /* 40k byte */ +#define HCLGE_DEFAULT_NON_DCB_DV 0x7800 /* 30K byte */ +#define HCLGE_NON_DCB_ADDITIONAL_BUF 0x1400 /* 5120 byte */ + +#define HCLGE_LED_LOCATE_STATE_S 0 +#define HCLGE_LED_LOCATE_STATE_M GENMASK(1, 0) + +struct hclge_set_led_state_cmd { + u8 rsv1[3]; + u8 locate_led_config; + u8 rsv2[20]; +}; + +struct hclge_get_fd_mode_cmd { + u8 mode; + u8 enable; + u8 rsv[22]; +}; + +struct hclge_get_fd_allocation_cmd { + __le32 stage1_entry_num; + __le32 stage2_entry_num; + __le16 stage1_counter_num; + __le16 stage2_counter_num; + u8 rsv[12]; +}; + +struct hclge_set_fd_key_config_cmd { + u8 stage; + u8 key_select; + u8 inner_sipv6_word_en; + u8 inner_dipv6_word_en; + u8 outer_sipv6_word_en; + u8 outer_dipv6_word_en; + u8 rsv1[2]; + __le32 tuple_mask; + __le32 meta_data_mask; + u8 rsv2[8]; +}; + +#define HCLGE_FD_EPORT_SW_EN_B 0 +struct hclge_fd_tcam_config_1_cmd { + u8 stage; + u8 xy_sel; + u8 port_info; + u8 rsv1[1]; + __le32 index; + u8 entry_vld; + u8 rsv2[7]; + u8 tcam_data[8]; +}; + +struct hclge_fd_tcam_config_2_cmd { + u8 tcam_data[24]; +}; + +struct hclge_fd_tcam_config_3_cmd { + u8 tcam_data[20]; + u8 rsv[4]; +}; + +#define HCLGE_FD_AD_DROP_B 0 +#define HCLGE_FD_AD_DIRECT_QID_B 1 +#define HCLGE_FD_AD_QID_S 2 +#define HCLGE_FD_AD_QID_M GENMASK(11, 2) +#define HCLGE_FD_AD_USE_COUNTER_B 12 +#define HCLGE_FD_AD_COUNTER_NUM_S 13 +#define HCLGE_FD_AD_COUNTER_NUM_M GENMASK(20, 13) +#define HCLGE_FD_AD_NXT_STEP_B 20 +#define HCLGE_FD_AD_NXT_KEY_S 21 +#define HCLGE_FD_AD_NXT_KEY_M GENMASK(25, 21) +#define HCLGE_FD_AD_WR_RULE_ID_B 0 +#define HCLGE_FD_AD_RULE_ID_S 1 +#define HCLGE_FD_AD_RULE_ID_M GENMASK(12, 1) +#define HCLGE_FD_AD_TC_OVRD_B 16 +#define HCLGE_FD_AD_TC_SIZE_S 17 +#define HCLGE_FD_AD_TC_SIZE_M GENMASK(20, 17) + +struct hclge_fd_ad_config_cmd { + u8 stage; + u8 rsv1[3]; + __le32 index; + __le64 ad_data; + u8 rsv2[8]; +}; + +struct hclge_fd_ad_cnt_read_cmd { + u8 rsv0[4]; + __le16 index; + u8 rsv1[2]; + __le64 cnt; + u8 rsv2[8]; +}; + +#define HCLGE_FD_USER_DEF_OFT_S 0 +#define HCLGE_FD_USER_DEF_OFT_M GENMASK(14, 0) +#define HCLGE_FD_USER_DEF_EN_B 15 +struct hclge_fd_user_def_cfg_cmd { + __le16 ol2_cfg; + __le16 l2_cfg; + __le16 ol3_cfg; + __le16 l3_cfg; + __le16 ol4_cfg; + __le16 l4_cfg; + u8 rsv[12]; +}; + +struct hclge_get_imp_bd_cmd { + __le32 bd_num; + u8 rsv[20]; +}; + +struct hclge_query_ppu_pf_other_int_dfx_cmd { + __le16 over_8bd_no_fe_qid; + __le16 over_8bd_no_fe_vf_id; + __le16 tso_mss_cmp_min_err_qid; + __le16 tso_mss_cmp_min_err_vf_id; + __le16 tso_mss_cmp_max_err_qid; + __le16 tso_mss_cmp_max_err_vf_id; + __le16 tx_rd_fbd_poison_qid; + __le16 tx_rd_fbd_poison_vf_id; + __le16 rx_rd_fbd_poison_qid; + __le16 rx_rd_fbd_poison_vf_id; + u8 rsv[4]; +}; + +#define HCLGE_SFP_INFO_CMD_NUM 6 +#define HCLGE_SFP_INFO_BD0_LEN 20 +#define HCLGE_SFP_INFO_BDX_LEN 24 +#define HCLGE_SFP_INFO_MAX_LEN \ + (HCLGE_SFP_INFO_BD0_LEN + \ + (HCLGE_SFP_INFO_CMD_NUM - 1) * HCLGE_SFP_INFO_BDX_LEN) + +struct hclge_sfp_info_bd0_cmd { + __le16 offset; + __le16 read_len; + u8 data[HCLGE_SFP_INFO_BD0_LEN]; +}; + +#define HCLGE_QUERY_DEV_SPECS_BD_NUM 4 + +struct hclge_dev_specs_0_cmd { + __le32 rsv0; + __le32 mac_entry_num; + __le32 mng_entry_num; + __le16 rss_ind_tbl_size; + __le16 rss_key_size; + __le16 int_ql_max; + u8 max_non_tso_bd_num; + u8 rsv1; + __le32 max_tm_rate; +}; + +#define HCLGE_DEF_MAX_INT_GL 0x1FE0U + +struct hclge_dev_specs_1_cmd { + __le16 max_frm_size; + __le16 max_qset_num; + __le16 max_int_gl; + u8 rsv0[2]; + __le16 umv_size; + __le16 mc_mac_size; + u8 rsv1[12]; +}; + +/* mac speed type defined in firmware command */ +enum HCLGE_FIRMWARE_MAC_SPEED { + HCLGE_FW_MAC_SPEED_1G, + HCLGE_FW_MAC_SPEED_10G, + HCLGE_FW_MAC_SPEED_25G, + HCLGE_FW_MAC_SPEED_40G, + HCLGE_FW_MAC_SPEED_50G, + HCLGE_FW_MAC_SPEED_100G, + HCLGE_FW_MAC_SPEED_10M, + HCLGE_FW_MAC_SPEED_100M, + HCLGE_FW_MAC_SPEED_200G, +}; + +#define HCLGE_PHY_LINK_SETTING_BD_NUM 2 + +struct hclge_phy_link_ksetting_0_cmd { + __le32 speed; + u8 duplex; + u8 autoneg; + u8 eth_tp_mdix; + u8 eth_tp_mdix_ctrl; + u8 port; + u8 transceiver; + u8 phy_address; + u8 rsv; + __le32 supported; + __le32 advertising; + __le32 lp_advertising; +}; + +struct hclge_phy_link_ksetting_1_cmd { + u8 master_slave_cfg; + u8 master_slave_state; + u8 rsv[22]; +}; + +struct hclge_phy_reg_cmd { + __le16 reg_addr; + u8 rsv0[2]; + __le16 reg_val; + u8 rsv1[18]; +}; + +struct hclge_hw; +int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num); +enum hclge_comm_cmd_status hclge_cmd_mdio_write(struct hclge_hw *hw, + struct hclge_desc *desc); +enum hclge_comm_cmd_status hclge_cmd_mdio_read(struct hclge_hw *hw, + struct hclge_desc *desc); +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c new file mode 100644 index 000000000..2740f0d70 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c @@ -0,0 +1,656 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. + +#include "hclge_main.h" +#include "hclge_dcb.h" +#include "hclge_tm.h" +#include "hnae3.h" + +#define BW_PERCENT 100 + +static int hclge_ieee_ets_to_tm_info(struct hclge_dev *hdev, + struct ieee_ets *ets) +{ + u8 i; + + for (i = 0; i < HNAE3_MAX_TC; i++) { + switch (ets->tc_tsa[i]) { + case IEEE_8021QAZ_TSA_STRICT: + hdev->tm_info.tc_info[i].tc_sch_mode = + HCLGE_SCH_MODE_SP; + hdev->tm_info.pg_info[0].tc_dwrr[i] = 0; + break; + case IEEE_8021QAZ_TSA_ETS: + hdev->tm_info.tc_info[i].tc_sch_mode = + HCLGE_SCH_MODE_DWRR; + hdev->tm_info.pg_info[0].tc_dwrr[i] = + ets->tc_tx_bw[i]; + break; + default: + /* Hardware only supports SP (strict priority) + * or ETS (enhanced transmission selection) + * algorithms, if we receive some other value + * from dcbnl, then throw an error. + */ + return -EINVAL; + } + } + + hclge_tm_prio_tc_info_update(hdev, ets->prio_tc); + + return 0; +} + +static void hclge_tm_info_to_ieee_ets(struct hclge_dev *hdev, + struct ieee_ets *ets) +{ + u32 i; + + memset(ets, 0, sizeof(*ets)); + ets->willing = 1; + ets->ets_cap = hdev->tc_max; + + for (i = 0; i < HNAE3_MAX_TC; i++) { + ets->prio_tc[i] = hdev->tm_info.prio_tc[i]; + if (i < hdev->tm_info.num_tc) + ets->tc_tx_bw[i] = hdev->tm_info.pg_info[0].tc_dwrr[i]; + else + ets->tc_tx_bw[i] = 0; + + if (hdev->tm_info.tc_info[i].tc_sch_mode == + HCLGE_SCH_MODE_SP) + ets->tc_tsa[i] = IEEE_8021QAZ_TSA_STRICT; + else + ets->tc_tsa[i] = IEEE_8021QAZ_TSA_ETS; + } +} + +/* IEEE std */ +static int hclge_ieee_getets(struct hnae3_handle *h, struct ieee_ets *ets) +{ + struct hclge_vport *vport = hclge_get_vport(h); + struct hclge_dev *hdev = vport->back; + + hclge_tm_info_to_ieee_ets(hdev, ets); + + return 0; +} + +static int hclge_dcb_common_validate(struct hclge_dev *hdev, u8 num_tc, + u8 *prio_tc) +{ + int i; + + if (num_tc > hdev->tc_max) { + dev_err(&hdev->pdev->dev, + "tc num checking failed, %u > tc_max(%u)\n", + num_tc, hdev->tc_max); + return -EINVAL; + } + + for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) { + if (prio_tc[i] >= num_tc) { + dev_err(&hdev->pdev->dev, + "prio_tc[%d] checking failed, %u >= num_tc(%u)\n", + i, prio_tc[i], num_tc); + return -EINVAL; + } + } + + if (num_tc > hdev->vport[0].alloc_tqps) { + dev_err(&hdev->pdev->dev, + "allocated tqp checking failed, %u > tqp(%u)\n", + num_tc, hdev->vport[0].alloc_tqps); + return -EINVAL; + } + + return 0; +} + +static u8 hclge_ets_tc_changed(struct hclge_dev *hdev, struct ieee_ets *ets, + bool *changed) +{ + u8 max_tc_id = 0; + u8 i; + + for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) { + if (ets->prio_tc[i] != hdev->tm_info.prio_tc[i]) + *changed = true; + + if (ets->prio_tc[i] > max_tc_id) + max_tc_id = ets->prio_tc[i]; + } + + /* return max tc number, max tc id need to plus 1 */ + return max_tc_id + 1; +} + +static int hclge_ets_sch_mode_validate(struct hclge_dev *hdev, + struct ieee_ets *ets, bool *changed, + u8 tc_num) +{ + bool has_ets_tc = false; + u32 total_ets_bw = 0; + u8 i; + + for (i = 0; i < HNAE3_MAX_TC; i++) { + switch (ets->tc_tsa[i]) { + case IEEE_8021QAZ_TSA_STRICT: + if (hdev->tm_info.tc_info[i].tc_sch_mode != + HCLGE_SCH_MODE_SP) + *changed = true; + break; + case IEEE_8021QAZ_TSA_ETS: + if (i >= tc_num) { + dev_err(&hdev->pdev->dev, + "tc%u is disabled, cannot set ets bw\n", + i); + return -EINVAL; + } + + /* The hardware will switch to sp mode if bandwidth is + * 0, so limit ets bandwidth must be greater than 0. + */ + if (!ets->tc_tx_bw[i]) { + dev_err(&hdev->pdev->dev, + "tc%u ets bw cannot be 0\n", i); + return -EINVAL; + } + + if (hdev->tm_info.tc_info[i].tc_sch_mode != + HCLGE_SCH_MODE_DWRR) + *changed = true; + + total_ets_bw += ets->tc_tx_bw[i]; + has_ets_tc = true; + break; + default: + return -EINVAL; + } + } + + if (has_ets_tc && total_ets_bw != BW_PERCENT) + return -EINVAL; + + return 0; +} + +static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets, + u8 *tc, bool *changed) +{ + u8 tc_num; + int ret; + + tc_num = hclge_ets_tc_changed(hdev, ets, changed); + + ret = hclge_dcb_common_validate(hdev, tc_num, ets->prio_tc); + if (ret) + return ret; + + ret = hclge_ets_sch_mode_validate(hdev, ets, changed, tc_num); + if (ret) + return ret; + + *tc = tc_num; + if (*tc != hdev->tm_info.num_tc) + *changed = true; + + return 0; +} + +static int hclge_map_update(struct hclge_dev *hdev) +{ + int ret; + + ret = hclge_tm_schd_setup_hw(hdev); + if (ret) + return ret; + + ret = hclge_pause_setup_hw(hdev, false); + if (ret) + return ret; + + ret = hclge_buffer_alloc(hdev); + if (ret) + return ret; + + hclge_comm_rss_indir_init_cfg(hdev->ae_dev, &hdev->rss_cfg); + + return hclge_rss_init_hw(hdev); +} + +static int hclge_notify_down_uinit(struct hclge_dev *hdev) +{ + int ret; + + ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); + if (ret) + return ret; + + return hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT); +} + +static int hclge_notify_init_up(struct hclge_dev *hdev) +{ + int ret; + + ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT); + if (ret) + return ret; + + return hclge_notify_client(hdev, HNAE3_UP_CLIENT); +} + +static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets) +{ + struct hclge_vport *vport = hclge_get_vport(h); + struct net_device *netdev = h->kinfo.netdev; + struct hclge_dev *hdev = vport->back; + bool map_changed = false; + u8 num_tc = 0; + int ret; + + if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) || + h->kinfo.tc_info.mqprio_active) + return -EINVAL; + + ret = hclge_ets_validate(hdev, ets, &num_tc, &map_changed); + if (ret) + return ret; + + if (map_changed) { + netif_dbg(h, drv, netdev, "set ets\n"); + + ret = hclge_notify_down_uinit(hdev); + if (ret) + return ret; + } + + hclge_tm_schd_info_update(hdev, num_tc); + h->kinfo.tc_info.dcb_ets_active = num_tc > 1; + + ret = hclge_ieee_ets_to_tm_info(hdev, ets); + if (ret) + goto err_out; + + if (map_changed) { + ret = hclge_map_update(hdev); + if (ret) + goto err_out; + + return hclge_notify_init_up(hdev); + } + + return hclge_tm_dwrr_cfg(hdev); + +err_out: + if (!map_changed) + return ret; + + hclge_notify_init_up(hdev); + + return ret; +} + +static int hclge_ieee_getpfc(struct hnae3_handle *h, struct ieee_pfc *pfc) +{ + struct hclge_vport *vport = hclge_get_vport(h); + struct hclge_dev *hdev = vport->back; + int ret; + + memset(pfc, 0, sizeof(*pfc)); + pfc->pfc_cap = hdev->pfc_max; + pfc->pfc_en = hdev->tm_info.pfc_en; + + ret = hclge_mac_update_stats(hdev); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to update MAC stats, ret = %d.\n", ret); + return ret; + } + + hclge_pfc_tx_stats_get(hdev, pfc->requests); + hclge_pfc_rx_stats_get(hdev, pfc->indications); + + return 0; +} + +static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc) +{ + struct hclge_vport *vport = hclge_get_vport(h); + struct net_device *netdev = h->kinfo.netdev; + struct hclge_dev *hdev = vport->back; + u8 i, j, pfc_map, *prio_tc; + int ret; + + if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) + return -EINVAL; + + if (pfc->pfc_en == hdev->tm_info.pfc_en) + return 0; + + prio_tc = hdev->tm_info.prio_tc; + pfc_map = 0; + + for (i = 0; i < hdev->tm_info.num_tc; i++) { + for (j = 0; j < HNAE3_MAX_USER_PRIO; j++) { + if ((prio_tc[j] == i) && (pfc->pfc_en & BIT(j))) { + pfc_map |= BIT(i); + break; + } + } + } + + hdev->tm_info.hw_pfc_map = pfc_map; + hdev->tm_info.pfc_en = pfc->pfc_en; + + netif_dbg(h, drv, netdev, + "set pfc: pfc_en=%x, pfc_map=%x, num_tc=%u\n", + pfc->pfc_en, pfc_map, hdev->tm_info.num_tc); + + hclge_tm_pfc_info_update(hdev); + + ret = hclge_pause_setup_hw(hdev, false); + if (ret) + return ret; + + ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); + if (ret) + return ret; + + ret = hclge_buffer_alloc(hdev); + if (ret) { + hclge_notify_client(hdev, HNAE3_UP_CLIENT); + return ret; + } + + return hclge_notify_client(hdev, HNAE3_UP_CLIENT); +} + +static int hclge_ieee_setapp(struct hnae3_handle *h, struct dcb_app *app) +{ + struct hclge_vport *vport = hclge_get_vport(h); + struct net_device *netdev = h->kinfo.netdev; + struct hclge_dev *hdev = vport->back; + struct dcb_app old_app; + int ret; + + if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP || + app->protocol >= HNAE3_MAX_DSCP || + app->priority >= HNAE3_MAX_USER_PRIO) + return -EINVAL; + + dev_info(&hdev->pdev->dev, "setapp dscp=%u priority=%u\n", + app->protocol, app->priority); + + if (app->priority == h->kinfo.dscp_prio[app->protocol]) + return 0; + + ret = dcb_ieee_setapp(netdev, app); + if (ret) + return ret; + + old_app.selector = IEEE_8021QAZ_APP_SEL_DSCP; + old_app.protocol = app->protocol; + old_app.priority = h->kinfo.dscp_prio[app->protocol]; + + h->kinfo.dscp_prio[app->protocol] = app->priority; + ret = hclge_dscp_to_tc_map(hdev); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to set dscp to tc map, ret = %d\n", ret); + h->kinfo.dscp_prio[app->protocol] = old_app.priority; + (void)dcb_ieee_delapp(netdev, app); + return ret; + } + + vport->nic.kinfo.tc_map_mode = HNAE3_TC_MAP_MODE_DSCP; + if (old_app.priority == HNAE3_PRIO_ID_INVALID) + h->kinfo.dscp_app_cnt++; + else + ret = dcb_ieee_delapp(netdev, &old_app); + + return ret; +} + +static int hclge_ieee_delapp(struct hnae3_handle *h, struct dcb_app *app) +{ + struct hclge_vport *vport = hclge_get_vport(h); + struct net_device *netdev = h->kinfo.netdev; + struct hclge_dev *hdev = vport->back; + int ret; + + if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP || + app->protocol >= HNAE3_MAX_DSCP || + app->priority >= HNAE3_MAX_USER_PRIO || + app->priority != h->kinfo.dscp_prio[app->protocol]) + return -EINVAL; + + dev_info(&hdev->pdev->dev, "delapp dscp=%u priority=%u\n", + app->protocol, app->priority); + + ret = dcb_ieee_delapp(netdev, app); + if (ret) + return ret; + + h->kinfo.dscp_prio[app->protocol] = HNAE3_PRIO_ID_INVALID; + ret = hclge_dscp_to_tc_map(hdev); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to del dscp to tc map, ret = %d\n", ret); + h->kinfo.dscp_prio[app->protocol] = app->priority; + (void)dcb_ieee_setapp(netdev, app); + return ret; + } + + if (h->kinfo.dscp_app_cnt) + h->kinfo.dscp_app_cnt--; + + if (!h->kinfo.dscp_app_cnt) { + vport->nic.kinfo.tc_map_mode = HNAE3_TC_MAP_MODE_PRIO; + ret = hclge_up_to_tc_map(hdev); + } + + return ret; +} + +/* DCBX configuration */ +static u8 hclge_getdcbx(struct hnae3_handle *h) +{ + struct hclge_vport *vport = hclge_get_vport(h); + struct hclge_dev *hdev = vport->back; + + if (h->kinfo.tc_info.mqprio_active) + return 0; + + return hdev->dcbx_cap; +} + +static u8 hclge_setdcbx(struct hnae3_handle *h, u8 mode) +{ + struct hclge_vport *vport = hclge_get_vport(h); + struct net_device *netdev = h->kinfo.netdev; + struct hclge_dev *hdev = vport->back; + + netif_dbg(h, drv, netdev, "set dcbx: mode=%u\n", mode); + + /* No support for LLD_MANAGED modes or CEE */ + if ((mode & DCB_CAP_DCBX_LLD_MANAGED) || + (mode & DCB_CAP_DCBX_VER_CEE) || + !(mode & DCB_CAP_DCBX_HOST)) + return 1; + + hdev->dcbx_cap = mode; + + return 0; +} + +static int hclge_mqprio_qopt_check(struct hclge_dev *hdev, + struct tc_mqprio_qopt_offload *mqprio_qopt) +{ + u16 queue_sum = 0; + int ret; + int i; + + if (!mqprio_qopt->qopt.num_tc) { + mqprio_qopt->qopt.num_tc = 1; + return 0; + } + + ret = hclge_dcb_common_validate(hdev, mqprio_qopt->qopt.num_tc, + mqprio_qopt->qopt.prio_tc_map); + if (ret) + return ret; + + for (i = 0; i < mqprio_qopt->qopt.num_tc; i++) { + if (!is_power_of_2(mqprio_qopt->qopt.count[i])) { + dev_err(&hdev->pdev->dev, + "qopt queue count must be power of 2\n"); + return -EINVAL; + } + + if (mqprio_qopt->qopt.count[i] > hdev->pf_rss_size_max) { + dev_err(&hdev->pdev->dev, + "qopt queue count should be no more than %u\n", + hdev->pf_rss_size_max); + return -EINVAL; + } + + if (mqprio_qopt->qopt.offset[i] != queue_sum) { + dev_err(&hdev->pdev->dev, + "qopt queue offset must start from 0, and being continuous\n"); + return -EINVAL; + } + + if (mqprio_qopt->min_rate[i] || mqprio_qopt->max_rate[i]) { + dev_err(&hdev->pdev->dev, + "qopt tx_rate is not supported\n"); + return -EOPNOTSUPP; + } + + queue_sum = mqprio_qopt->qopt.offset[i]; + queue_sum += mqprio_qopt->qopt.count[i]; + } + if (hdev->vport[0].alloc_tqps < queue_sum) { + dev_err(&hdev->pdev->dev, + "qopt queue count sum should be less than %u\n", + hdev->vport[0].alloc_tqps); + return -EINVAL; + } + + return 0; +} + +static void hclge_sync_mqprio_qopt(struct hnae3_tc_info *tc_info, + struct tc_mqprio_qopt_offload *mqprio_qopt) +{ + memset(tc_info, 0, sizeof(*tc_info)); + tc_info->num_tc = mqprio_qopt->qopt.num_tc; + memcpy(tc_info->prio_tc, mqprio_qopt->qopt.prio_tc_map, + sizeof_field(struct hnae3_tc_info, prio_tc)); + memcpy(tc_info->tqp_count, mqprio_qopt->qopt.count, + sizeof_field(struct hnae3_tc_info, tqp_count)); + memcpy(tc_info->tqp_offset, mqprio_qopt->qopt.offset, + sizeof_field(struct hnae3_tc_info, tqp_offset)); +} + +static int hclge_config_tc(struct hclge_dev *hdev, + struct hnae3_tc_info *tc_info) +{ + int i; + + hclge_tm_schd_info_update(hdev, tc_info->num_tc); + for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) + hdev->tm_info.prio_tc[i] = tc_info->prio_tc[i]; + + return hclge_map_update(hdev); +} + +/* Set up TC for hardware offloaded mqprio in channel mode */ +static int hclge_setup_tc(struct hnae3_handle *h, + struct tc_mqprio_qopt_offload *mqprio_qopt) +{ + struct hclge_vport *vport = hclge_get_vport(h); + struct hnae3_knic_private_info *kinfo; + struct hclge_dev *hdev = vport->back; + struct hnae3_tc_info old_tc_info; + u8 tc = mqprio_qopt->qopt.num_tc; + int ret; + + /* if client unregistered, it's not allowed to change + * mqprio configuration, which may cause uninit ring + * fail. + */ + if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state)) + return -EBUSY; + + kinfo = &vport->nic.kinfo; + if (kinfo->tc_info.dcb_ets_active) + return -EINVAL; + + ret = hclge_mqprio_qopt_check(hdev, mqprio_qopt); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to check mqprio qopt params, ret = %d\n", ret); + return ret; + } + + ret = hclge_notify_down_uinit(hdev); + if (ret) + return ret; + + memcpy(&old_tc_info, &kinfo->tc_info, sizeof(old_tc_info)); + hclge_sync_mqprio_qopt(&kinfo->tc_info, mqprio_qopt); + kinfo->tc_info.mqprio_active = tc > 0; + + ret = hclge_config_tc(hdev, &kinfo->tc_info); + if (ret) + goto err_out; + + return hclge_notify_init_up(hdev); + +err_out: + if (!tc) { + dev_warn(&hdev->pdev->dev, + "failed to destroy mqprio, will active after reset, ret = %d\n", + ret); + } else { + /* roll-back */ + memcpy(&kinfo->tc_info, &old_tc_info, sizeof(old_tc_info)); + if (hclge_config_tc(hdev, &kinfo->tc_info)) + dev_err(&hdev->pdev->dev, + "failed to roll back tc configuration\n"); + } + hclge_notify_init_up(hdev); + + return ret; +} + +static const struct hnae3_dcb_ops hns3_dcb_ops = { + .ieee_getets = hclge_ieee_getets, + .ieee_setets = hclge_ieee_setets, + .ieee_getpfc = hclge_ieee_getpfc, + .ieee_setpfc = hclge_ieee_setpfc, + .ieee_setapp = hclge_ieee_setapp, + .ieee_delapp = hclge_ieee_delapp, + .getdcbx = hclge_getdcbx, + .setdcbx = hclge_setdcbx, + .setup_tc = hclge_setup_tc, +}; + +void hclge_dcb_ops_set(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = hdev->vport; + struct hnae3_knic_private_info *kinfo; + + /* Hdev does not support DCB or vport is + * not a pf, then dcb_ops is not set. + */ + if (!hnae3_dev_dcb_supported(hdev) || + vport->vport_id != 0) + return; + + kinfo = &vport->nic.kinfo; + kinfo->dcb_ops = &hns3_dcb_ops; + hdev->dcbx_cap = DCB_CAP_DCBX_VER_IEEE | DCB_CAP_DCBX_HOST; +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h new file mode 100644 index 000000000..b04702e65 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +// Copyright (c) 2016-2017 Hisilicon Limited. + +#ifndef __HCLGE_DCB_H__ +#define __HCLGE_DCB_H__ + +#include "hclge_main.h" + +#ifdef CONFIG_HNS3_DCB +void hclge_dcb_ops_set(struct hclge_dev *hdev); +#else +static inline void hclge_dcb_ops_set(struct hclge_dev *hdev) {} +#endif + +#endif /* __HCLGE_DCB_H__ */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c new file mode 100644 index 000000000..a1c59f4aa --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c @@ -0,0 +1,2587 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2018-2019 Hisilicon Limited. */ + +#include + +#include "hclge_debugfs.h" +#include "hclge_err.h" +#include "hclge_main.h" +#include "hclge_tm.h" +#include "hnae3.h" + +static const char * const state_str[] = { "off", "on" }; +static const char * const hclge_mac_state_str[] = { + "TO_ADD", "TO_DEL", "ACTIVE" +}; + +static const char * const tc_map_mode_str[] = { "PRIO", "DSCP" }; + +static const struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = { + { .cmd = HNAE3_DBG_CMD_REG_BIOS_COMMON, + .dfx_msg = &hclge_dbg_bios_common_reg[0], + .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_bios_common_reg), + .offset = HCLGE_DBG_DFX_BIOS_OFFSET, + .cmd = HCLGE_OPC_DFX_BIOS_COMMON_REG } }, + { .cmd = HNAE3_DBG_CMD_REG_SSU, + .dfx_msg = &hclge_dbg_ssu_reg_0[0], + .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_0), + .offset = HCLGE_DBG_DFX_SSU_0_OFFSET, + .cmd = HCLGE_OPC_DFX_SSU_REG_0 } }, + { .cmd = HNAE3_DBG_CMD_REG_SSU, + .dfx_msg = &hclge_dbg_ssu_reg_1[0], + .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_1), + .offset = HCLGE_DBG_DFX_SSU_1_OFFSET, + .cmd = HCLGE_OPC_DFX_SSU_REG_1 } }, + { .cmd = HNAE3_DBG_CMD_REG_SSU, + .dfx_msg = &hclge_dbg_ssu_reg_2[0], + .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_2), + .offset = HCLGE_DBG_DFX_SSU_2_OFFSET, + .cmd = HCLGE_OPC_DFX_SSU_REG_2 } }, + { .cmd = HNAE3_DBG_CMD_REG_IGU_EGU, + .dfx_msg = &hclge_dbg_igu_egu_reg[0], + .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_igu_egu_reg), + .offset = HCLGE_DBG_DFX_IGU_OFFSET, + .cmd = HCLGE_OPC_DFX_IGU_EGU_REG } }, + { .cmd = HNAE3_DBG_CMD_REG_RPU, + .dfx_msg = &hclge_dbg_rpu_reg_0[0], + .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_0), + .offset = HCLGE_DBG_DFX_RPU_0_OFFSET, + .cmd = HCLGE_OPC_DFX_RPU_REG_0 } }, + { .cmd = HNAE3_DBG_CMD_REG_RPU, + .dfx_msg = &hclge_dbg_rpu_reg_1[0], + .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_1), + .offset = HCLGE_DBG_DFX_RPU_1_OFFSET, + .cmd = HCLGE_OPC_DFX_RPU_REG_1 } }, + { .cmd = HNAE3_DBG_CMD_REG_NCSI, + .dfx_msg = &hclge_dbg_ncsi_reg[0], + .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ncsi_reg), + .offset = HCLGE_DBG_DFX_NCSI_OFFSET, + .cmd = HCLGE_OPC_DFX_NCSI_REG } }, + { .cmd = HNAE3_DBG_CMD_REG_RTC, + .dfx_msg = &hclge_dbg_rtc_reg[0], + .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rtc_reg), + .offset = HCLGE_DBG_DFX_RTC_OFFSET, + .cmd = HCLGE_OPC_DFX_RTC_REG } }, + { .cmd = HNAE3_DBG_CMD_REG_PPP, + .dfx_msg = &hclge_dbg_ppp_reg[0], + .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ppp_reg), + .offset = HCLGE_DBG_DFX_PPP_OFFSET, + .cmd = HCLGE_OPC_DFX_PPP_REG } }, + { .cmd = HNAE3_DBG_CMD_REG_RCB, + .dfx_msg = &hclge_dbg_rcb_reg[0], + .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rcb_reg), + .offset = HCLGE_DBG_DFX_RCB_OFFSET, + .cmd = HCLGE_OPC_DFX_RCB_REG } }, + { .cmd = HNAE3_DBG_CMD_REG_TQP, + .dfx_msg = &hclge_dbg_tqp_reg[0], + .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_tqp_reg), + .offset = HCLGE_DBG_DFX_TQP_OFFSET, + .cmd = HCLGE_OPC_DFX_TQP_REG } }, +}; + +/* make sure: len(name) + interval >= maxlen(item data) + 2, + * for example, name = "pkt_num"(len: 7), the prototype of item data is u32, + * and print as "%u"(maxlen: 10), so the interval should be at least 5. + */ +static void hclge_dbg_fill_content(char *content, u16 len, + const struct hclge_dbg_item *items, + const char **result, u16 size) +{ +#define HCLGE_DBG_LINE_END_LEN 2 + char *pos = content; + u16 item_len; + u16 i; + + if (!len) { + return; + } else if (len <= HCLGE_DBG_LINE_END_LEN) { + *pos++ = '\0'; + return; + } + + memset(content, ' ', len); + len -= HCLGE_DBG_LINE_END_LEN; + + for (i = 0; i < size; i++) { + item_len = strlen(items[i].name) + items[i].interval; + if (len < item_len) + break; + + if (result) { + if (item_len < strlen(result[i])) + break; + memcpy(pos, result[i], strlen(result[i])); + } else { + memcpy(pos, items[i].name, strlen(items[i].name)); + } + pos += item_len; + len -= item_len; + } + *pos++ = '\n'; + *pos++ = '\0'; +} + +static char *hclge_dbg_get_func_id_str(char *buf, u8 id) +{ + if (id) + sprintf(buf, "vf%u", id - 1U); + else + sprintf(buf, "pf"); + + return buf; +} + +static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset, + u32 *bd_num) +{ + struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT]; + int entries_per_desc; + int index; + int ret; + + ret = hclge_query_bd_num_cmd_send(hdev, desc); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get dfx bd_num, offset = %d, ret = %d\n", + offset, ret); + return ret; + } + + entries_per_desc = ARRAY_SIZE(desc[0].data); + index = offset % entries_per_desc; + + *bd_num = le32_to_cpu(desc[offset / entries_per_desc].data[index]); + if (!(*bd_num)) { + dev_err(&hdev->pdev->dev, "The value of dfx bd_num is 0!\n"); + return -EINVAL; + } + + return 0; +} + +static int hclge_dbg_cmd_send(struct hclge_dev *hdev, + struct hclge_desc *desc_src, + int index, int bd_num, + enum hclge_opcode_type cmd) +{ + struct hclge_desc *desc = desc_src; + int ret, i; + + hclge_cmd_setup_basic_desc(desc, cmd, true); + desc->data[0] = cpu_to_le32(index); + + for (i = 1; i < bd_num; i++) { + desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + desc++; + hclge_cmd_setup_basic_desc(desc, cmd, true); + } + + ret = hclge_cmd_send(&hdev->hw, desc_src, bd_num); + if (ret) + dev_err(&hdev->pdev->dev, + "cmd(0x%x) send fail, ret = %d\n", cmd, ret); + return ret; +} + +static int +hclge_dbg_dump_reg_tqp(struct hclge_dev *hdev, + const struct hclge_dbg_reg_type_info *reg_info, + char *buf, int len, int *pos) +{ + const struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg; + const struct hclge_dbg_reg_common_msg *reg_msg = ®_info->reg_msg; + struct hclge_desc *desc_src; + u32 index, entry, i, cnt; + int bd_num, min_num, ret; + struct hclge_desc *desc; + + ret = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset, &bd_num); + if (ret) + return ret; + + desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL); + if (!desc_src) + return -ENOMEM; + + min_num = min_t(int, bd_num * HCLGE_DESC_DATA_LEN, reg_msg->msg_num); + + for (i = 0, cnt = 0; i < min_num; i++, dfx_message++) + *pos += scnprintf(buf + *pos, len - *pos, "item%u = %s\n", + cnt++, dfx_message->message); + + for (i = 0; i < cnt; i++) + *pos += scnprintf(buf + *pos, len - *pos, "item%u\t", i); + + *pos += scnprintf(buf + *pos, len - *pos, "\n"); + + for (index = 0; index < hdev->vport[0].alloc_tqps; index++) { + dfx_message = reg_info->dfx_msg; + desc = desc_src; + ret = hclge_dbg_cmd_send(hdev, desc, index, bd_num, + reg_msg->cmd); + if (ret) + break; + + for (i = 0; i < min_num; i++, dfx_message++) { + entry = i % HCLGE_DESC_DATA_LEN; + if (i > 0 && !entry) + desc++; + + *pos += scnprintf(buf + *pos, len - *pos, "%#x\t", + le32_to_cpu(desc->data[entry])); + } + *pos += scnprintf(buf + *pos, len - *pos, "\n"); + } + + kfree(desc_src); + return ret; +} + +static int +hclge_dbg_dump_reg_common(struct hclge_dev *hdev, + const struct hclge_dbg_reg_type_info *reg_info, + char *buf, int len, int *pos) +{ + const struct hclge_dbg_reg_common_msg *reg_msg = ®_info->reg_msg; + const struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg; + struct hclge_desc *desc_src; + int bd_num, min_num, ret; + struct hclge_desc *desc; + u32 entry, i; + + ret = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset, &bd_num); + if (ret) + return ret; + + desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL); + if (!desc_src) + return -ENOMEM; + + desc = desc_src; + + ret = hclge_dbg_cmd_send(hdev, desc, 0, bd_num, reg_msg->cmd); + if (ret) { + kfree(desc); + return ret; + } + + min_num = min_t(int, bd_num * HCLGE_DESC_DATA_LEN, reg_msg->msg_num); + + for (i = 0; i < min_num; i++, dfx_message++) { + entry = i % HCLGE_DESC_DATA_LEN; + if (i > 0 && !entry) + desc++; + if (!dfx_message->flag) + continue; + + *pos += scnprintf(buf + *pos, len - *pos, "%s: %#x\n", + dfx_message->message, + le32_to_cpu(desc->data[entry])); + } + + kfree(desc_src); + return 0; +} + +static const struct hclge_dbg_status_dfx_info hclge_dbg_mac_en_status[] = { + {HCLGE_MAC_TX_EN_B, "mac_trans_en"}, + {HCLGE_MAC_RX_EN_B, "mac_rcv_en"}, + {HCLGE_MAC_PAD_TX_B, "pad_trans_en"}, + {HCLGE_MAC_PAD_RX_B, "pad_rcv_en"}, + {HCLGE_MAC_1588_TX_B, "1588_trans_en"}, + {HCLGE_MAC_1588_RX_B, "1588_rcv_en"}, + {HCLGE_MAC_APP_LP_B, "mac_app_loop_en"}, + {HCLGE_MAC_LINE_LP_B, "mac_line_loop_en"}, + {HCLGE_MAC_FCS_TX_B, "mac_fcs_tx_en"}, + {HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, "mac_rx_oversize_truncate_en"}, + {HCLGE_MAC_RX_FCS_STRIP_B, "mac_rx_fcs_strip_en"}, + {HCLGE_MAC_RX_FCS_B, "mac_rx_fcs_en"}, + {HCLGE_MAC_TX_UNDER_MIN_ERR_B, "mac_tx_under_min_err_en"}, + {HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, "mac_tx_oversize_truncate_en"} +}; + +static int hclge_dbg_dump_mac_enable_status(struct hclge_dev *hdev, char *buf, + int len, int *pos) +{ + struct hclge_config_mac_mode_cmd *req; + struct hclge_desc desc; + u32 loop_en, i, offset; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to dump mac enable status, ret = %d\n", ret); + return ret; + } + + req = (struct hclge_config_mac_mode_cmd *)desc.data; + loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en); + + for (i = 0; i < ARRAY_SIZE(hclge_dbg_mac_en_status); i++) { + offset = hclge_dbg_mac_en_status[i].offset; + *pos += scnprintf(buf + *pos, len - *pos, "%s: %#x\n", + hclge_dbg_mac_en_status[i].message, + hnae3_get_bit(loop_en, offset)); + } + + return 0; +} + +static int hclge_dbg_dump_mac_frame_size(struct hclge_dev *hdev, char *buf, + int len, int *pos) +{ + struct hclge_config_max_frm_size_cmd *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, true); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to dump mac frame size, ret = %d\n", ret); + return ret; + } + + req = (struct hclge_config_max_frm_size_cmd *)desc.data; + + *pos += scnprintf(buf + *pos, len - *pos, "max_frame_size: %u\n", + le16_to_cpu(req->max_frm_size)); + *pos += scnprintf(buf + *pos, len - *pos, "min_frame_size: %u\n", + req->min_frm_size); + + return 0; +} + +static int hclge_dbg_dump_mac_speed_duplex(struct hclge_dev *hdev, char *buf, + int len, int *pos) +{ +#define HCLGE_MAC_SPEED_SHIFT 0 +#define HCLGE_MAC_SPEED_MASK GENMASK(5, 0) +#define HCLGE_MAC_DUPLEX_SHIFT 7 + + struct hclge_config_mac_speed_dup_cmd *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, true); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to dump mac speed duplex, ret = %d\n", ret); + return ret; + } + + req = (struct hclge_config_mac_speed_dup_cmd *)desc.data; + + *pos += scnprintf(buf + *pos, len - *pos, "speed: %#lx\n", + hnae3_get_field(req->speed_dup, HCLGE_MAC_SPEED_MASK, + HCLGE_MAC_SPEED_SHIFT)); + *pos += scnprintf(buf + *pos, len - *pos, "duplex: %#x\n", + hnae3_get_bit(req->speed_dup, + HCLGE_MAC_DUPLEX_SHIFT)); + return 0; +} + +static int hclge_dbg_dump_mac(struct hclge_dev *hdev, char *buf, int len) +{ + int pos = 0; + int ret; + + ret = hclge_dbg_dump_mac_enable_status(hdev, buf, len, &pos); + if (ret) + return ret; + + ret = hclge_dbg_dump_mac_frame_size(hdev, buf, len, &pos); + if (ret) + return ret; + + return hclge_dbg_dump_mac_speed_duplex(hdev, buf, len, &pos); +} + +static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, char *buf, int len, + int *pos) +{ + struct hclge_dbg_bitmap_cmd req; + struct hclge_desc desc; + u16 qset_id, qset_num; + int ret; + + ret = hclge_tm_get_qset_num(hdev, &qset_num); + if (ret) + return ret; + + *pos += scnprintf(buf + *pos, len - *pos, + "qset_id roce_qset_mask nic_qset_mask qset_shaping_pass qset_bp_status\n"); + for (qset_id = 0; qset_id < qset_num; qset_id++) { + ret = hclge_dbg_cmd_send(hdev, &desc, qset_id, 1, + HCLGE_OPC_QSET_DFX_STS); + if (ret) + return ret; + + req.bitmap = (u8)le32_to_cpu(desc.data[1]); + + *pos += scnprintf(buf + *pos, len - *pos, + "%04u %#x %#x %#x %#x\n", + qset_id, req.bit0, req.bit1, req.bit2, + req.bit3); + } + + return 0; +} + +static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, char *buf, int len, + int *pos) +{ + struct hclge_dbg_bitmap_cmd req; + struct hclge_desc desc; + u8 pri_id, pri_num; + int ret; + + ret = hclge_tm_get_pri_num(hdev, &pri_num); + if (ret) + return ret; + + *pos += scnprintf(buf + *pos, len - *pos, + "pri_id pri_mask pri_cshaping_pass pri_pshaping_pass\n"); + for (pri_id = 0; pri_id < pri_num; pri_id++) { + ret = hclge_dbg_cmd_send(hdev, &desc, pri_id, 1, + HCLGE_OPC_PRI_DFX_STS); + if (ret) + return ret; + + req.bitmap = (u8)le32_to_cpu(desc.data[1]); + + *pos += scnprintf(buf + *pos, len - *pos, + "%03u %#x %#x %#x\n", + pri_id, req.bit0, req.bit1, req.bit2); + } + + return 0; +} + +static int hclge_dbg_dump_dcb_pg(struct hclge_dev *hdev, char *buf, int len, + int *pos) +{ + struct hclge_dbg_bitmap_cmd req; + struct hclge_desc desc; + u8 pg_id; + int ret; + + *pos += scnprintf(buf + *pos, len - *pos, + "pg_id pg_mask pg_cshaping_pass pg_pshaping_pass\n"); + for (pg_id = 0; pg_id < hdev->tm_info.num_pg; pg_id++) { + ret = hclge_dbg_cmd_send(hdev, &desc, pg_id, 1, + HCLGE_OPC_PG_DFX_STS); + if (ret) + return ret; + + req.bitmap = (u8)le32_to_cpu(desc.data[1]); + + *pos += scnprintf(buf + *pos, len - *pos, + "%03u %#x %#x %#x\n", + pg_id, req.bit0, req.bit1, req.bit2); + } + + return 0; +} + +static int hclge_dbg_dump_dcb_queue(struct hclge_dev *hdev, char *buf, int len, + int *pos) +{ + struct hclge_desc desc; + u16 nq_id; + int ret; + + *pos += scnprintf(buf + *pos, len - *pos, + "nq_id sch_nic_queue_cnt sch_roce_queue_cnt\n"); + for (nq_id = 0; nq_id < hdev->num_tqps; nq_id++) { + ret = hclge_dbg_cmd_send(hdev, &desc, nq_id, 1, + HCLGE_OPC_SCH_NQ_CNT); + if (ret) + return ret; + + *pos += scnprintf(buf + *pos, len - *pos, "%04u %#x", + nq_id, le32_to_cpu(desc.data[1])); + + ret = hclge_dbg_cmd_send(hdev, &desc, nq_id, 1, + HCLGE_OPC_SCH_RQ_CNT); + if (ret) + return ret; + + *pos += scnprintf(buf + *pos, len - *pos, + " %#x\n", + le32_to_cpu(desc.data[1])); + } + + return 0; +} + +static int hclge_dbg_dump_dcb_port(struct hclge_dev *hdev, char *buf, int len, + int *pos) +{ + struct hclge_dbg_bitmap_cmd req; + struct hclge_desc desc; + u8 port_id = 0; + int ret; + + ret = hclge_dbg_cmd_send(hdev, &desc, port_id, 1, + HCLGE_OPC_PORT_DFX_STS); + if (ret) + return ret; + + req.bitmap = (u8)le32_to_cpu(desc.data[1]); + + *pos += scnprintf(buf + *pos, len - *pos, "port_mask: %#x\n", + req.bit0); + *pos += scnprintf(buf + *pos, len - *pos, "port_shaping_pass: %#x\n", + req.bit1); + + return 0; +} + +static int hclge_dbg_dump_dcb_tm(struct hclge_dev *hdev, char *buf, int len, + int *pos) +{ + struct hclge_desc desc[2]; + u8 port_id = 0; + int ret; + + ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1, + HCLGE_OPC_TM_INTERNAL_CNT); + if (ret) + return ret; + + *pos += scnprintf(buf + *pos, len - *pos, "SCH_NIC_NUM: %#x\n", + le32_to_cpu(desc[0].data[1])); + *pos += scnprintf(buf + *pos, len - *pos, "SCH_ROCE_NUM: %#x\n", + le32_to_cpu(desc[0].data[2])); + + ret = hclge_dbg_cmd_send(hdev, desc, port_id, 2, + HCLGE_OPC_TM_INTERNAL_STS); + if (ret) + return ret; + + *pos += scnprintf(buf + *pos, len - *pos, "pri_bp: %#x\n", + le32_to_cpu(desc[0].data[1])); + *pos += scnprintf(buf + *pos, len - *pos, "fifo_dfx_info: %#x\n", + le32_to_cpu(desc[0].data[2])); + *pos += scnprintf(buf + *pos, len - *pos, + "sch_roce_fifo_afull_gap: %#x\n", + le32_to_cpu(desc[0].data[3])); + *pos += scnprintf(buf + *pos, len - *pos, + "tx_private_waterline: %#x\n", + le32_to_cpu(desc[0].data[4])); + *pos += scnprintf(buf + *pos, len - *pos, "tm_bypass_en: %#x\n", + le32_to_cpu(desc[0].data[5])); + *pos += scnprintf(buf + *pos, len - *pos, "SSU_TM_BYPASS_EN: %#x\n", + le32_to_cpu(desc[1].data[0])); + *pos += scnprintf(buf + *pos, len - *pos, "SSU_RESERVE_CFG: %#x\n", + le32_to_cpu(desc[1].data[1])); + + if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) + return 0; + + ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1, + HCLGE_OPC_TM_INTERNAL_STS_1); + if (ret) + return ret; + + *pos += scnprintf(buf + *pos, len - *pos, "TC_MAP_SEL: %#x\n", + le32_to_cpu(desc[0].data[1])); + *pos += scnprintf(buf + *pos, len - *pos, "IGU_PFC_PRI_EN: %#x\n", + le32_to_cpu(desc[0].data[2])); + *pos += scnprintf(buf + *pos, len - *pos, "MAC_PFC_PRI_EN: %#x\n", + le32_to_cpu(desc[0].data[3])); + *pos += scnprintf(buf + *pos, len - *pos, "IGU_PRI_MAP_TC_CFG: %#x\n", + le32_to_cpu(desc[0].data[4])); + *pos += scnprintf(buf + *pos, len - *pos, + "IGU_TX_PRI_MAP_TC_CFG: %#x\n", + le32_to_cpu(desc[0].data[5])); + + return 0; +} + +static int hclge_dbg_dump_dcb(struct hclge_dev *hdev, char *buf, int len) +{ + int pos = 0; + int ret; + + ret = hclge_dbg_dump_dcb_qset(hdev, buf, len, &pos); + if (ret) + return ret; + + ret = hclge_dbg_dump_dcb_pri(hdev, buf, len, &pos); + if (ret) + return ret; + + ret = hclge_dbg_dump_dcb_pg(hdev, buf, len, &pos); + if (ret) + return ret; + + ret = hclge_dbg_dump_dcb_queue(hdev, buf, len, &pos); + if (ret) + return ret; + + ret = hclge_dbg_dump_dcb_port(hdev, buf, len, &pos); + if (ret) + return ret; + + return hclge_dbg_dump_dcb_tm(hdev, buf, len, &pos); +} + +static int hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev, + enum hnae3_dbg_cmd cmd, char *buf, int len) +{ + const struct hclge_dbg_reg_type_info *reg_info; + int pos = 0, ret = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(hclge_dbg_reg_info); i++) { + reg_info = &hclge_dbg_reg_info[i]; + if (cmd == reg_info->cmd) { + if (cmd == HNAE3_DBG_CMD_REG_TQP) + return hclge_dbg_dump_reg_tqp(hdev, reg_info, + buf, len, &pos); + + ret = hclge_dbg_dump_reg_common(hdev, reg_info, buf, + len, &pos); + if (ret) + break; + } + } + + return ret; +} + +static int hclge_dbg_dump_tc(struct hclge_dev *hdev, char *buf, int len) +{ + struct hclge_ets_tc_weight_cmd *ets_weight; + struct hclge_desc desc; + char *sch_mode_str; + int pos = 0; + int ret; + u8 i; + + if (!hnae3_dev_dcb_supported(hdev)) { + dev_err(&hdev->pdev->dev, + "Only DCB-supported dev supports tc\n"); + return -EOPNOTSUPP; + } + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, "failed to get tc weight, ret = %d\n", + ret); + return ret; + } + + ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data; + + pos += scnprintf(buf + pos, len - pos, "enabled tc number: %u\n", + hdev->tm_info.num_tc); + pos += scnprintf(buf + pos, len - pos, "weight_offset: %u\n", + ets_weight->weight_offset); + + pos += scnprintf(buf + pos, len - pos, "TC MODE WEIGHT\n"); + for (i = 0; i < HNAE3_MAX_TC; i++) { + sch_mode_str = ets_weight->tc_weight[i] ? "dwrr" : "sp"; + pos += scnprintf(buf + pos, len - pos, "%u %4s %3u\n", + i, sch_mode_str, ets_weight->tc_weight[i]); + } + + return 0; +} + +static const struct hclge_dbg_item tm_pg_items[] = { + { "ID", 2 }, + { "PRI_MAP", 2 }, + { "MODE", 2 }, + { "DWRR", 2 }, + { "C_IR_B", 2 }, + { "C_IR_U", 2 }, + { "C_IR_S", 2 }, + { "C_BS_B", 2 }, + { "C_BS_S", 2 }, + { "C_FLAG", 2 }, + { "C_RATE(Mbps)", 2 }, + { "P_IR_B", 2 }, + { "P_IR_U", 2 }, + { "P_IR_S", 2 }, + { "P_BS_B", 2 }, + { "P_BS_S", 2 }, + { "P_FLAG", 2 }, + { "P_RATE(Mbps)", 0 } +}; + +static void hclge_dbg_fill_shaper_content(struct hclge_tm_shaper_para *para, + char **result, u8 *index) +{ + sprintf(result[(*index)++], "%3u", para->ir_b); + sprintf(result[(*index)++], "%3u", para->ir_u); + sprintf(result[(*index)++], "%3u", para->ir_s); + sprintf(result[(*index)++], "%3u", para->bs_b); + sprintf(result[(*index)++], "%3u", para->bs_s); + sprintf(result[(*index)++], "%3u", para->flag); + sprintf(result[(*index)++], "%6u", para->rate); +} + +static int __hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *data_str, + char *buf, int len) +{ + struct hclge_tm_shaper_para c_shaper_para, p_shaper_para; + char *result[ARRAY_SIZE(tm_pg_items)], *sch_mode_str; + u8 pg_id, sch_mode, weight, pri_bit_map, i, j; + char content[HCLGE_DBG_TM_INFO_LEN]; + int pos = 0; + int ret; + + for (i = 0; i < ARRAY_SIZE(tm_pg_items); i++) { + result[i] = data_str; + data_str += HCLGE_DBG_DATA_STR_LEN; + } + + hclge_dbg_fill_content(content, sizeof(content), tm_pg_items, + NULL, ARRAY_SIZE(tm_pg_items)); + pos += scnprintf(buf + pos, len - pos, "%s", content); + + for (pg_id = 0; pg_id < hdev->tm_info.num_pg; pg_id++) { + ret = hclge_tm_get_pg_to_pri_map(hdev, pg_id, &pri_bit_map); + if (ret) + return ret; + + ret = hclge_tm_get_pg_sch_mode(hdev, pg_id, &sch_mode); + if (ret) + return ret; + + ret = hclge_tm_get_pg_weight(hdev, pg_id, &weight); + if (ret) + return ret; + + ret = hclge_tm_get_pg_shaper(hdev, pg_id, + HCLGE_OPC_TM_PG_C_SHAPPING, + &c_shaper_para); + if (ret) + return ret; + + ret = hclge_tm_get_pg_shaper(hdev, pg_id, + HCLGE_OPC_TM_PG_P_SHAPPING, + &p_shaper_para); + if (ret) + return ret; + + sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" : + "sp"; + + j = 0; + sprintf(result[j++], "%02u", pg_id); + sprintf(result[j++], "0x%02x", pri_bit_map); + sprintf(result[j++], "%4s", sch_mode_str); + sprintf(result[j++], "%3u", weight); + hclge_dbg_fill_shaper_content(&c_shaper_para, result, &j); + hclge_dbg_fill_shaper_content(&p_shaper_para, result, &j); + + hclge_dbg_fill_content(content, sizeof(content), tm_pg_items, + (const char **)result, + ARRAY_SIZE(tm_pg_items)); + pos += scnprintf(buf + pos, len - pos, "%s", content); + } + + return 0; +} + +static int hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *buf, int len) +{ + char *data_str; + int ret; + + data_str = kcalloc(ARRAY_SIZE(tm_pg_items), + HCLGE_DBG_DATA_STR_LEN, GFP_KERNEL); + if (!data_str) + return -ENOMEM; + + ret = __hclge_dbg_dump_tm_pg(hdev, data_str, buf, len); + + kfree(data_str); + + return ret; +} + +static int hclge_dbg_dump_tm_port(struct hclge_dev *hdev, char *buf, int len) +{ + struct hclge_tm_shaper_para shaper_para; + int pos = 0; + int ret; + + ret = hclge_tm_get_port_shaper(hdev, &shaper_para); + if (ret) + return ret; + + pos += scnprintf(buf + pos, len - pos, + "IR_B IR_U IR_S BS_B BS_S FLAG RATE(Mbps)\n"); + pos += scnprintf(buf + pos, len - pos, + "%3u %3u %3u %3u %3u %1u %6u\n", + shaper_para.ir_b, shaper_para.ir_u, shaper_para.ir_s, + shaper_para.bs_b, shaper_para.bs_s, shaper_para.flag, + shaper_para.rate); + + return 0; +} + +static int hclge_dbg_dump_tm_bp_qset_map(struct hclge_dev *hdev, u8 tc_id, + char *buf, int len) +{ + u32 qset_mapping[HCLGE_BP_EXT_GRP_NUM]; + struct hclge_bp_to_qs_map_cmd *map; + struct hclge_desc desc; + int pos = 0; + u8 group_id; + u8 grp_num; + u16 i = 0; + int ret; + + grp_num = hdev->num_tqps <= HCLGE_TQP_MAX_SIZE_DEV_V2 ? + HCLGE_BP_GRP_NUM : HCLGE_BP_EXT_GRP_NUM; + map = (struct hclge_bp_to_qs_map_cmd *)desc.data; + for (group_id = 0; group_id < grp_num; group_id++) { + hclge_cmd_setup_basic_desc(&desc, + HCLGE_OPC_TM_BP_TO_QSET_MAPPING, + true); + map->tc_id = tc_id; + map->qs_group_id = group_id; + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get bp to qset map, ret = %d\n", + ret); + return ret; + } + + qset_mapping[group_id] = le32_to_cpu(map->qs_bit_map); + } + + pos += scnprintf(buf + pos, len - pos, "INDEX | TM BP QSET MAPPING:\n"); + for (group_id = 0; group_id < grp_num / 8; group_id++) { + pos += scnprintf(buf + pos, len - pos, + "%04d | %08x:%08x:%08x:%08x:%08x:%08x:%08x:%08x\n", + group_id * 256, qset_mapping[i + 7], + qset_mapping[i + 6], qset_mapping[i + 5], + qset_mapping[i + 4], qset_mapping[i + 3], + qset_mapping[i + 2], qset_mapping[i + 1], + qset_mapping[i]); + i += 8; + } + + return pos; +} + +static int hclge_dbg_dump_tm_map(struct hclge_dev *hdev, char *buf, int len) +{ + u16 queue_id; + u16 qset_id; + u8 link_vld; + int pos = 0; + u8 pri_id; + u8 tc_id; + int ret; + + for (queue_id = 0; queue_id < hdev->num_tqps; queue_id++) { + ret = hclge_tm_get_q_to_qs_map(hdev, queue_id, &qset_id); + if (ret) + return ret; + + ret = hclge_tm_get_qset_map_pri(hdev, qset_id, &pri_id, + &link_vld); + if (ret) + return ret; + + ret = hclge_tm_get_q_to_tc(hdev, queue_id, &tc_id); + if (ret) + return ret; + + pos += scnprintf(buf + pos, len - pos, + "QUEUE_ID QSET_ID PRI_ID TC_ID\n"); + pos += scnprintf(buf + pos, len - pos, + "%04u %4u %3u %2u\n", + queue_id, qset_id, pri_id, tc_id); + + if (!hnae3_dev_dcb_supported(hdev)) + continue; + + ret = hclge_dbg_dump_tm_bp_qset_map(hdev, tc_id, buf + pos, + len - pos); + if (ret < 0) + return ret; + pos += ret; + + pos += scnprintf(buf + pos, len - pos, "\n"); + } + + return 0; +} + +static int hclge_dbg_dump_tm_nodes(struct hclge_dev *hdev, char *buf, int len) +{ + struct hclge_tm_nodes_cmd *nodes; + struct hclge_desc desc; + int pos = 0; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to dump tm nodes, ret = %d\n", ret); + return ret; + } + + nodes = (struct hclge_tm_nodes_cmd *)desc.data; + + pos += scnprintf(buf + pos, len - pos, " BASE_ID MAX_NUM\n"); + pos += scnprintf(buf + pos, len - pos, "PG %4u %4u\n", + nodes->pg_base_id, nodes->pg_num); + pos += scnprintf(buf + pos, len - pos, "PRI %4u %4u\n", + nodes->pri_base_id, nodes->pri_num); + pos += scnprintf(buf + pos, len - pos, "QSET %4u %4u\n", + le16_to_cpu(nodes->qset_base_id), + le16_to_cpu(nodes->qset_num)); + pos += scnprintf(buf + pos, len - pos, "QUEUE %4u %4u\n", + le16_to_cpu(nodes->queue_base_id), + le16_to_cpu(nodes->queue_num)); + + return 0; +} + +static const struct hclge_dbg_item tm_pri_items[] = { + { "ID", 4 }, + { "MODE", 2 }, + { "DWRR", 2 }, + { "C_IR_B", 2 }, + { "C_IR_U", 2 }, + { "C_IR_S", 2 }, + { "C_BS_B", 2 }, + { "C_BS_S", 2 }, + { "C_FLAG", 2 }, + { "C_RATE(Mbps)", 2 }, + { "P_IR_B", 2 }, + { "P_IR_U", 2 }, + { "P_IR_S", 2 }, + { "P_BS_B", 2 }, + { "P_BS_S", 2 }, + { "P_FLAG", 2 }, + { "P_RATE(Mbps)", 0 } +}; + +static int hclge_dbg_dump_tm_pri(struct hclge_dev *hdev, char *buf, int len) +{ + char data_str[ARRAY_SIZE(tm_pri_items)][HCLGE_DBG_DATA_STR_LEN]; + struct hclge_tm_shaper_para c_shaper_para, p_shaper_para; + char *result[ARRAY_SIZE(tm_pri_items)], *sch_mode_str; + char content[HCLGE_DBG_TM_INFO_LEN]; + u8 pri_num, sch_mode, weight, i, j; + int pos, ret; + + ret = hclge_tm_get_pri_num(hdev, &pri_num); + if (ret) + return ret; + + for (i = 0; i < ARRAY_SIZE(tm_pri_items); i++) + result[i] = &data_str[i][0]; + + hclge_dbg_fill_content(content, sizeof(content), tm_pri_items, + NULL, ARRAY_SIZE(tm_pri_items)); + pos = scnprintf(buf, len, "%s", content); + + for (i = 0; i < pri_num; i++) { + ret = hclge_tm_get_pri_sch_mode(hdev, i, &sch_mode); + if (ret) + return ret; + + ret = hclge_tm_get_pri_weight(hdev, i, &weight); + if (ret) + return ret; + + ret = hclge_tm_get_pri_shaper(hdev, i, + HCLGE_OPC_TM_PRI_C_SHAPPING, + &c_shaper_para); + if (ret) + return ret; + + ret = hclge_tm_get_pri_shaper(hdev, i, + HCLGE_OPC_TM_PRI_P_SHAPPING, + &p_shaper_para); + if (ret) + return ret; + + sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" : + "sp"; + + j = 0; + sprintf(result[j++], "%04u", i); + sprintf(result[j++], "%4s", sch_mode_str); + sprintf(result[j++], "%3u", weight); + hclge_dbg_fill_shaper_content(&c_shaper_para, result, &j); + hclge_dbg_fill_shaper_content(&p_shaper_para, result, &j); + hclge_dbg_fill_content(content, sizeof(content), tm_pri_items, + (const char **)result, + ARRAY_SIZE(tm_pri_items)); + pos += scnprintf(buf + pos, len - pos, "%s", content); + } + + return 0; +} + +static const struct hclge_dbg_item tm_qset_items[] = { + { "ID", 4 }, + { "MAP_PRI", 2 }, + { "LINK_VLD", 2 }, + { "MODE", 2 }, + { "DWRR", 2 }, + { "IR_B", 2 }, + { "IR_U", 2 }, + { "IR_S", 2 }, + { "BS_B", 2 }, + { "BS_S", 2 }, + { "FLAG", 2 }, + { "RATE(Mbps)", 0 } +}; + +static int hclge_dbg_dump_tm_qset(struct hclge_dev *hdev, char *buf, int len) +{ + char data_str[ARRAY_SIZE(tm_qset_items)][HCLGE_DBG_DATA_STR_LEN]; + char *result[ARRAY_SIZE(tm_qset_items)], *sch_mode_str; + u8 priority, link_vld, sch_mode, weight; + struct hclge_tm_shaper_para shaper_para; + char content[HCLGE_DBG_TM_INFO_LEN]; + u16 qset_num, i; + int ret, pos; + u8 j; + + ret = hclge_tm_get_qset_num(hdev, &qset_num); + if (ret) + return ret; + + for (i = 0; i < ARRAY_SIZE(tm_qset_items); i++) + result[i] = &data_str[i][0]; + + hclge_dbg_fill_content(content, sizeof(content), tm_qset_items, + NULL, ARRAY_SIZE(tm_qset_items)); + pos = scnprintf(buf, len, "%s", content); + + for (i = 0; i < qset_num; i++) { + ret = hclge_tm_get_qset_map_pri(hdev, i, &priority, &link_vld); + if (ret) + return ret; + + ret = hclge_tm_get_qset_sch_mode(hdev, i, &sch_mode); + if (ret) + return ret; + + ret = hclge_tm_get_qset_weight(hdev, i, &weight); + if (ret) + return ret; + + ret = hclge_tm_get_qset_shaper(hdev, i, &shaper_para); + if (ret) + return ret; + + sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" : + "sp"; + + j = 0; + sprintf(result[j++], "%04u", i); + sprintf(result[j++], "%4u", priority); + sprintf(result[j++], "%4u", link_vld); + sprintf(result[j++], "%4s", sch_mode_str); + sprintf(result[j++], "%3u", weight); + hclge_dbg_fill_shaper_content(&shaper_para, result, &j); + + hclge_dbg_fill_content(content, sizeof(content), tm_qset_items, + (const char **)result, + ARRAY_SIZE(tm_qset_items)); + pos += scnprintf(buf + pos, len - pos, "%s", content); + } + + return 0; +} + +static int hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev, char *buf, + int len) +{ + struct hclge_cfg_pause_param_cmd *pause_param; + struct hclge_desc desc; + int pos = 0; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to dump qos pause, ret = %d\n", ret); + return ret; + } + + pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data; + + pos += scnprintf(buf + pos, len - pos, "pause_trans_gap: 0x%x\n", + pause_param->pause_trans_gap); + pos += scnprintf(buf + pos, len - pos, "pause_trans_time: 0x%x\n", + le16_to_cpu(pause_param->pause_trans_time)); + return 0; +} + +#define HCLGE_DBG_TC_MASK 0x0F + +static int hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev, char *buf, + int len) +{ +#define HCLGE_DBG_TC_BIT_WIDTH 4 + + struct hclge_qos_pri_map_cmd *pri_map; + struct hclge_desc desc; + int pos = 0; + u8 *pri_tc; + u8 tc, i; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to dump qos pri map, ret = %d\n", ret); + return ret; + } + + pri_map = (struct hclge_qos_pri_map_cmd *)desc.data; + + pos += scnprintf(buf + pos, len - pos, "vlan_to_pri: 0x%x\n", + pri_map->vlan_pri); + pos += scnprintf(buf + pos, len - pos, "PRI TC\n"); + + pri_tc = (u8 *)pri_map; + for (i = 0; i < HNAE3_MAX_TC; i++) { + tc = pri_tc[i >> 1] >> ((i & 1) * HCLGE_DBG_TC_BIT_WIDTH); + tc &= HCLGE_DBG_TC_MASK; + pos += scnprintf(buf + pos, len - pos, "%u %u\n", i, tc); + } + + return 0; +} + +static int hclge_dbg_dump_qos_dscp_map(struct hclge_dev *hdev, char *buf, + int len) +{ + struct hnae3_knic_private_info *kinfo = &hdev->vport[0].nic.kinfo; + struct hclge_desc desc[HCLGE_DSCP_MAP_TC_BD_NUM]; + u8 *req0 = (u8 *)desc[0].data; + u8 *req1 = (u8 *)desc[1].data; + u8 dscp_tc[HNAE3_MAX_DSCP]; + int pos, ret; + u8 i, j; + + pos = scnprintf(buf, len, "tc map mode: %s\n", + tc_map_mode_str[kinfo->tc_map_mode]); + + if (kinfo->tc_map_mode != HNAE3_TC_MAP_MODE_DSCP) + return 0; + + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QOS_MAP, true); + desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_QOS_MAP, true); + ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_DSCP_MAP_TC_BD_NUM); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to dump qos dscp map, ret = %d\n", ret); + return ret; + } + + pos += scnprintf(buf + pos, len - pos, "\nDSCP PRIO TC\n"); + + /* The low 32 dscp setting use bd0, high 32 dscp setting use bd1 */ + for (i = 0; i < HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM; i++) { + j = i + HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM; + /* Each dscp setting has 4 bits, so each byte saves two dscp + * setting + */ + dscp_tc[i] = req0[i >> 1] >> HCLGE_DSCP_TC_SHIFT(i); + dscp_tc[j] = req1[i >> 1] >> HCLGE_DSCP_TC_SHIFT(i); + dscp_tc[i] &= HCLGE_DBG_TC_MASK; + dscp_tc[j] &= HCLGE_DBG_TC_MASK; + } + + for (i = 0; i < HNAE3_MAX_DSCP; i++) { + if (kinfo->dscp_prio[i] == HNAE3_PRIO_ID_INVALID) + continue; + + pos += scnprintf(buf + pos, len - pos, " %2u %u %u\n", + i, kinfo->dscp_prio[i], dscp_tc[i]); + } + + return 0; +} + +static int hclge_dbg_dump_tx_buf_cfg(struct hclge_dev *hdev, char *buf, int len) +{ + struct hclge_tx_buff_alloc_cmd *tx_buf_cmd; + struct hclge_desc desc; + int pos = 0; + int i, ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to dump tx buf, ret = %d\n", ret); + return ret; + } + + tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc.data; + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) + pos += scnprintf(buf + pos, len - pos, + "tx_packet_buf_tc_%d: 0x%x\n", i, + le16_to_cpu(tx_buf_cmd->tx_pkt_buff[i])); + + return pos; +} + +static int hclge_dbg_dump_rx_priv_buf_cfg(struct hclge_dev *hdev, char *buf, + int len) +{ + struct hclge_rx_priv_buff_cmd *rx_buf_cmd; + struct hclge_desc desc; + int pos = 0; + int i, ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to dump rx priv buf, ret = %d\n", ret); + return ret; + } + + pos += scnprintf(buf + pos, len - pos, "\n"); + + rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc.data; + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) + pos += scnprintf(buf + pos, len - pos, + "rx_packet_buf_tc_%d: 0x%x\n", i, + le16_to_cpu(rx_buf_cmd->buf_num[i])); + + pos += scnprintf(buf + pos, len - pos, "rx_share_buf: 0x%x\n", + le16_to_cpu(rx_buf_cmd->shared_buf)); + + return pos; +} + +static int hclge_dbg_dump_rx_common_wl_cfg(struct hclge_dev *hdev, char *buf, + int len) +{ + struct hclge_rx_com_wl *rx_com_wl; + struct hclge_desc desc; + int pos = 0; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to dump rx common wl, ret = %d\n", ret); + return ret; + } + + rx_com_wl = (struct hclge_rx_com_wl *)desc.data; + pos += scnprintf(buf + pos, len - pos, "\n"); + pos += scnprintf(buf + pos, len - pos, + "rx_com_wl: high: 0x%x, low: 0x%x\n", + le16_to_cpu(rx_com_wl->com_wl.high), + le16_to_cpu(rx_com_wl->com_wl.low)); + + return pos; +} + +static int hclge_dbg_dump_rx_global_pkt_cnt(struct hclge_dev *hdev, char *buf, + int len) +{ + struct hclge_rx_com_wl *rx_packet_cnt; + struct hclge_desc desc; + int pos = 0; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_GBL_PKT_CNT, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to dump rx global pkt cnt, ret = %d\n", ret); + return ret; + } + + rx_packet_cnt = (struct hclge_rx_com_wl *)desc.data; + pos += scnprintf(buf + pos, len - pos, + "rx_global_packet_cnt: high: 0x%x, low: 0x%x\n", + le16_to_cpu(rx_packet_cnt->com_wl.high), + le16_to_cpu(rx_packet_cnt->com_wl.low)); + + return pos; +} + +static int hclge_dbg_dump_rx_priv_wl_buf_cfg(struct hclge_dev *hdev, char *buf, + int len) +{ + struct hclge_rx_priv_wl_buf *rx_priv_wl; + struct hclge_desc desc[2]; + int pos = 0; + int i, ret; + + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_PRIV_WL_ALLOC, true); + desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_PRIV_WL_ALLOC, true); + ret = hclge_cmd_send(&hdev->hw, desc, 2); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to dump rx priv wl buf, ret = %d\n", ret); + return ret; + } + + rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[0].data; + for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++) + pos += scnprintf(buf + pos, len - pos, + "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i, + le16_to_cpu(rx_priv_wl->tc_wl[i].high), + le16_to_cpu(rx_priv_wl->tc_wl[i].low)); + + rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[1].data; + for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++) + pos += scnprintf(buf + pos, len - pos, + "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", + i + HCLGE_TC_NUM_ONE_DESC, + le16_to_cpu(rx_priv_wl->tc_wl[i].high), + le16_to_cpu(rx_priv_wl->tc_wl[i].low)); + + return pos; +} + +static int hclge_dbg_dump_rx_common_threshold_cfg(struct hclge_dev *hdev, + char *buf, int len) +{ + struct hclge_rx_com_thrd *rx_com_thrd; + struct hclge_desc desc[2]; + int pos = 0; + int i, ret; + + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_COM_THRD_ALLOC, true); + desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_COM_THRD_ALLOC, true); + ret = hclge_cmd_send(&hdev->hw, desc, 2); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to dump rx common threshold, ret = %d\n", ret); + return ret; + } + + pos += scnprintf(buf + pos, len - pos, "\n"); + rx_com_thrd = (struct hclge_rx_com_thrd *)desc[0].data; + for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++) + pos += scnprintf(buf + pos, len - pos, + "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i, + le16_to_cpu(rx_com_thrd->com_thrd[i].high), + le16_to_cpu(rx_com_thrd->com_thrd[i].low)); + + rx_com_thrd = (struct hclge_rx_com_thrd *)desc[1].data; + for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++) + pos += scnprintf(buf + pos, len - pos, + "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", + i + HCLGE_TC_NUM_ONE_DESC, + le16_to_cpu(rx_com_thrd->com_thrd[i].high), + le16_to_cpu(rx_com_thrd->com_thrd[i].low)); + + return pos; +} + +static int hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev, char *buf, + int len) +{ + int pos = 0; + int ret; + + ret = hclge_dbg_dump_tx_buf_cfg(hdev, buf + pos, len - pos); + if (ret < 0) + return ret; + pos += ret; + + ret = hclge_dbg_dump_rx_priv_buf_cfg(hdev, buf + pos, len - pos); + if (ret < 0) + return ret; + pos += ret; + + ret = hclge_dbg_dump_rx_common_wl_cfg(hdev, buf + pos, len - pos); + if (ret < 0) + return ret; + pos += ret; + + ret = hclge_dbg_dump_rx_global_pkt_cnt(hdev, buf + pos, len - pos); + if (ret < 0) + return ret; + pos += ret; + + pos += scnprintf(buf + pos, len - pos, "\n"); + if (!hnae3_dev_dcb_supported(hdev)) + return 0; + + ret = hclge_dbg_dump_rx_priv_wl_buf_cfg(hdev, buf + pos, len - pos); + if (ret < 0) + return ret; + pos += ret; + + ret = hclge_dbg_dump_rx_common_threshold_cfg(hdev, buf + pos, + len - pos); + if (ret < 0) + return ret; + + return 0; +} + +static int hclge_dbg_dump_mng_table(struct hclge_dev *hdev, char *buf, int len) +{ + struct hclge_mac_ethertype_idx_rd_cmd *req0; + struct hclge_desc desc; + u32 msg_egress_port; + int pos = 0; + int ret, i; + + pos += scnprintf(buf + pos, len - pos, + "entry mac_addr mask ether "); + pos += scnprintf(buf + pos, len - pos, + "mask vlan mask i_map i_dir e_type "); + pos += scnprintf(buf + pos, len - pos, "pf_id vf_id q_id drop\n"); + + for (i = 0; i < HCLGE_DBG_MNG_TBL_MAX; i++) { + hclge_cmd_setup_basic_desc(&desc, HCLGE_MAC_ETHERTYPE_IDX_RD, + true); + req0 = (struct hclge_mac_ethertype_idx_rd_cmd *)&desc.data; + req0->index = cpu_to_le16(i); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to dump manage table, ret = %d\n", ret); + return ret; + } + + if (!req0->resp_code) + continue; + + pos += scnprintf(buf + pos, len - pos, "%02u %pM ", + le16_to_cpu(req0->index), req0->mac_addr); + + pos += scnprintf(buf + pos, len - pos, + "%x %04x %x %04x ", + !!(req0->flags & HCLGE_DBG_MNG_MAC_MASK_B), + le16_to_cpu(req0->ethter_type), + !!(req0->flags & HCLGE_DBG_MNG_ETHER_MASK_B), + le16_to_cpu(req0->vlan_tag) & + HCLGE_DBG_MNG_VLAN_TAG); + + pos += scnprintf(buf + pos, len - pos, + "%x %02x %02x ", + !!(req0->flags & HCLGE_DBG_MNG_VLAN_MASK_B), + req0->i_port_bitmap, req0->i_port_direction); + + msg_egress_port = le16_to_cpu(req0->egress_port); + pos += scnprintf(buf + pos, len - pos, + "%x %x %02x %04x %x\n", + !!(msg_egress_port & HCLGE_DBG_MNG_E_TYPE_B), + msg_egress_port & HCLGE_DBG_MNG_PF_ID, + (msg_egress_port >> 3) & HCLGE_DBG_MNG_VF_ID, + le16_to_cpu(req0->egress_queue), + !!(msg_egress_port & HCLGE_DBG_MNG_DROP_B)); + } + + return 0; +} + +#define HCLGE_DBG_TCAM_BUF_SIZE 256 + +static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, bool sel_x, + char *tcam_buf, + struct hclge_dbg_tcam_msg tcam_msg) +{ + struct hclge_fd_tcam_config_1_cmd *req1; + struct hclge_fd_tcam_config_2_cmd *req2; + struct hclge_fd_tcam_config_3_cmd *req3; + struct hclge_desc desc[3]; + int pos = 0; + int ret, i; + __le32 *req; + + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, true); + desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, true); + desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, true); + + req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data; + req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data; + req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data; + + req1->stage = tcam_msg.stage; + req1->xy_sel = sel_x ? 1 : 0; + req1->index = cpu_to_le32(tcam_msg.loc); + + ret = hclge_cmd_send(&hdev->hw, desc, 3); + if (ret) + return ret; + + pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos, + "read result tcam key %s(%u):\n", sel_x ? "x" : "y", + tcam_msg.loc); + + /* tcam_data0 ~ tcam_data1 */ + req = (__le32 *)req1->tcam_data; + for (i = 0; i < 2; i++) + pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos, + "%08x\n", le32_to_cpu(*req++)); + + /* tcam_data2 ~ tcam_data7 */ + req = (__le32 *)req2->tcam_data; + for (i = 0; i < 6; i++) + pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos, + "%08x\n", le32_to_cpu(*req++)); + + /* tcam_data8 ~ tcam_data12 */ + req = (__le32 *)req3->tcam_data; + for (i = 0; i < 5; i++) + pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos, + "%08x\n", le32_to_cpu(*req++)); + + return ret; +} + +static int hclge_dbg_get_rules_location(struct hclge_dev *hdev, u16 *rule_locs) +{ + struct hclge_fd_rule *rule; + struct hlist_node *node; + int cnt = 0; + + spin_lock_bh(&hdev->fd_rule_lock); + hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { + rule_locs[cnt] = rule->location; + cnt++; + } + spin_unlock_bh(&hdev->fd_rule_lock); + + if (cnt != hdev->hclge_fd_rule_num || cnt == 0) + return -EINVAL; + + return cnt; +} + +static int hclge_dbg_dump_fd_tcam(struct hclge_dev *hdev, char *buf, int len) +{ + u32 rule_num = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]; + struct hclge_dbg_tcam_msg tcam_msg; + int i, ret, rule_cnt; + u16 *rule_locs; + char *tcam_buf; + int pos = 0; + + if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) { + dev_err(&hdev->pdev->dev, + "Only FD-supported dev supports dump fd tcam\n"); + return -EOPNOTSUPP; + } + + if (!hdev->hclge_fd_rule_num || !rule_num) + return 0; + + rule_locs = kcalloc(rule_num, sizeof(u16), GFP_KERNEL); + if (!rule_locs) + return -ENOMEM; + + tcam_buf = kzalloc(HCLGE_DBG_TCAM_BUF_SIZE, GFP_KERNEL); + if (!tcam_buf) { + kfree(rule_locs); + return -ENOMEM; + } + + rule_cnt = hclge_dbg_get_rules_location(hdev, rule_locs); + if (rule_cnt < 0) { + ret = rule_cnt; + dev_err(&hdev->pdev->dev, + "failed to get rule number, ret = %d\n", ret); + goto out; + } + + ret = 0; + for (i = 0; i < rule_cnt; i++) { + tcam_msg.stage = HCLGE_FD_STAGE_1; + tcam_msg.loc = rule_locs[i]; + + ret = hclge_dbg_fd_tcam_read(hdev, true, tcam_buf, tcam_msg); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get fd tcam key x, ret = %d\n", ret); + goto out; + } + + pos += scnprintf(buf + pos, len - pos, "%s", tcam_buf); + + ret = hclge_dbg_fd_tcam_read(hdev, false, tcam_buf, tcam_msg); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get fd tcam key y, ret = %d\n", ret); + goto out; + } + + pos += scnprintf(buf + pos, len - pos, "%s", tcam_buf); + } + +out: + kfree(tcam_buf); + kfree(rule_locs); + return ret; +} + +static int hclge_dbg_dump_fd_counter(struct hclge_dev *hdev, char *buf, int len) +{ + u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */ + struct hclge_fd_ad_cnt_read_cmd *req; + char str_id[HCLGE_DBG_ID_LEN]; + struct hclge_desc desc; + int pos = 0; + int ret; + u64 cnt; + u8 i; + + if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) + return -EOPNOTSUPP; + + pos += scnprintf(buf + pos, len - pos, + "func_id\thit_times\n"); + + for (i = 0; i < func_num; i++) { + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_CNT_OP, true); + req = (struct hclge_fd_ad_cnt_read_cmd *)desc.data; + req->index = cpu_to_le16(i); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, "failed to get fd counter, ret = %d\n", + ret); + return ret; + } + cnt = le64_to_cpu(req->cnt); + hclge_dbg_get_func_id_str(str_id, i); + pos += scnprintf(buf + pos, len - pos, + "%s\t%llu\n", str_id, cnt); + } + + return 0; +} + +static const struct hclge_dbg_status_dfx_info hclge_dbg_rst_info[] = { + {HCLGE_MISC_VECTOR_REG_BASE, "vector0 interrupt enable status"}, + {HCLGE_MISC_RESET_STS_REG, "reset interrupt source"}, + {HCLGE_MISC_VECTOR_INT_STS, "reset interrupt status"}, + {HCLGE_RAS_PF_OTHER_INT_STS_REG, "RAS interrupt status"}, + {HCLGE_GLOBAL_RESET_REG, "hardware reset status"}, + {HCLGE_NIC_CSQ_DEPTH_REG, "handshake status"}, + {HCLGE_FUN_RST_ING, "function reset status"} +}; + +int hclge_dbg_dump_rst_info(struct hclge_dev *hdev, char *buf, int len) +{ + u32 i, offset; + int pos = 0; + + pos += scnprintf(buf + pos, len - pos, "PF reset count: %u\n", + hdev->rst_stats.pf_rst_cnt); + pos += scnprintf(buf + pos, len - pos, "FLR reset count: %u\n", + hdev->rst_stats.flr_rst_cnt); + pos += scnprintf(buf + pos, len - pos, "GLOBAL reset count: %u\n", + hdev->rst_stats.global_rst_cnt); + pos += scnprintf(buf + pos, len - pos, "IMP reset count: %u\n", + hdev->rst_stats.imp_rst_cnt); + pos += scnprintf(buf + pos, len - pos, "reset done count: %u\n", + hdev->rst_stats.reset_done_cnt); + pos += scnprintf(buf + pos, len - pos, "HW reset done count: %u\n", + hdev->rst_stats.hw_reset_done_cnt); + pos += scnprintf(buf + pos, len - pos, "reset count: %u\n", + hdev->rst_stats.reset_cnt); + pos += scnprintf(buf + pos, len - pos, "reset fail count: %u\n", + hdev->rst_stats.reset_fail_cnt); + + for (i = 0; i < ARRAY_SIZE(hclge_dbg_rst_info); i++) { + offset = hclge_dbg_rst_info[i].offset; + pos += scnprintf(buf + pos, len - pos, "%s: 0x%x\n", + hclge_dbg_rst_info[i].message, + hclge_read_dev(&hdev->hw, offset)); + } + + pos += scnprintf(buf + pos, len - pos, "hdev state: 0x%lx\n", + hdev->state); + + return 0; +} + +static int hclge_dbg_dump_serv_info(struct hclge_dev *hdev, char *buf, int len) +{ + unsigned long rem_nsec; + int pos = 0; + u64 lc; + + lc = local_clock(); + rem_nsec = do_div(lc, HCLGE_BILLION_NANO_SECONDS); + + pos += scnprintf(buf + pos, len - pos, "local_clock: [%5lu.%06lu]\n", + (unsigned long)lc, rem_nsec / 1000); + pos += scnprintf(buf + pos, len - pos, "delta: %u(ms)\n", + jiffies_to_msecs(jiffies - hdev->last_serv_processed)); + pos += scnprintf(buf + pos, len - pos, + "last_service_task_processed: %lu(jiffies)\n", + hdev->last_serv_processed); + pos += scnprintf(buf + pos, len - pos, "last_service_task_cnt: %lu\n", + hdev->serv_processed_cnt); + + return 0; +} + +static int hclge_dbg_dump_interrupt(struct hclge_dev *hdev, char *buf, int len) +{ + int pos = 0; + + pos += scnprintf(buf + pos, len - pos, "num_nic_msi: %u\n", + hdev->num_nic_msi); + pos += scnprintf(buf + pos, len - pos, "num_roce_msi: %u\n", + hdev->num_roce_msi); + pos += scnprintf(buf + pos, len - pos, "num_msi_used: %u\n", + hdev->num_msi_used); + pos += scnprintf(buf + pos, len - pos, "num_msi_left: %u\n", + hdev->num_msi_left); + + return 0; +} + +static void hclge_dbg_imp_info_data_print(struct hclge_desc *desc_src, + char *buf, int len, u32 bd_num) +{ +#define HCLGE_DBG_IMP_INFO_PRINT_OFFSET 0x2 + + struct hclge_desc *desc_index = desc_src; + u32 offset = 0; + int pos = 0; + u32 i, j; + + pos += scnprintf(buf + pos, len - pos, "offset | data\n"); + + for (i = 0; i < bd_num; i++) { + j = 0; + while (j < HCLGE_DESC_DATA_LEN - 1) { + pos += scnprintf(buf + pos, len - pos, "0x%04x | ", + offset); + pos += scnprintf(buf + pos, len - pos, "0x%08x ", + le32_to_cpu(desc_index->data[j++])); + pos += scnprintf(buf + pos, len - pos, "0x%08x\n", + le32_to_cpu(desc_index->data[j++])); + offset += sizeof(u32) * HCLGE_DBG_IMP_INFO_PRINT_OFFSET; + } + desc_index++; + } +} + +static int +hclge_dbg_get_imp_stats_info(struct hclge_dev *hdev, char *buf, int len) +{ + struct hclge_get_imp_bd_cmd *req; + struct hclge_desc *desc_src; + struct hclge_desc desc; + u32 bd_num; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_IMP_STATS_BD, true); + + req = (struct hclge_get_imp_bd_cmd *)desc.data; + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get imp statistics bd number, ret = %d\n", + ret); + return ret; + } + + bd_num = le32_to_cpu(req->bd_num); + if (!bd_num) { + dev_err(&hdev->pdev->dev, "imp statistics bd number is 0!\n"); + return -EINVAL; + } + + desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL); + if (!desc_src) + return -ENOMEM; + + ret = hclge_dbg_cmd_send(hdev, desc_src, 0, bd_num, + HCLGE_OPC_IMP_STATS_INFO); + if (ret) { + kfree(desc_src); + dev_err(&hdev->pdev->dev, + "failed to get imp statistics, ret = %d\n", ret); + return ret; + } + + hclge_dbg_imp_info_data_print(desc_src, buf, len, bd_num); + + kfree(desc_src); + + return 0; +} + +#define HCLGE_CMD_NCL_CONFIG_BD_NUM 5 +#define HCLGE_MAX_NCL_CONFIG_LENGTH 16384 + +static void hclge_ncl_config_data_print(struct hclge_desc *desc, int *index, + char *buf, int len, int *pos) +{ +#define HCLGE_CMD_DATA_NUM 6 + + int offset = HCLGE_MAX_NCL_CONFIG_LENGTH - *index; + int i, j; + + for (i = 0; i < HCLGE_CMD_NCL_CONFIG_BD_NUM; i++) { + for (j = 0; j < HCLGE_CMD_DATA_NUM; j++) { + if (i == 0 && j == 0) + continue; + + *pos += scnprintf(buf + *pos, len - *pos, + "0x%04x | 0x%08x\n", offset, + le32_to_cpu(desc[i].data[j])); + + offset += sizeof(u32); + *index -= sizeof(u32); + + if (*index <= 0) + return; + } + } +} + +static int +hclge_dbg_dump_ncl_config(struct hclge_dev *hdev, char *buf, int len) +{ +#define HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD (20 + 24 * 4) + + struct hclge_desc desc[HCLGE_CMD_NCL_CONFIG_BD_NUM]; + int bd_num = HCLGE_CMD_NCL_CONFIG_BD_NUM; + int index = HCLGE_MAX_NCL_CONFIG_LENGTH; + int pos = 0; + u32 data0; + int ret; + + pos += scnprintf(buf + pos, len - pos, "offset | data\n"); + + while (index > 0) { + data0 = HCLGE_MAX_NCL_CONFIG_LENGTH - index; + if (index >= HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD) + data0 |= HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD << 16; + else + data0 |= (u32)index << 16; + ret = hclge_dbg_cmd_send(hdev, desc, data0, bd_num, + HCLGE_OPC_QUERY_NCL_CONFIG); + if (ret) + return ret; + + hclge_ncl_config_data_print(desc, &index, buf, len, &pos); + } + + return 0; +} + +static int hclge_dbg_dump_loopback(struct hclge_dev *hdev, char *buf, int len) +{ + struct phy_device *phydev = hdev->hw.mac.phydev; + struct hclge_config_mac_mode_cmd *req_app; + struct hclge_common_lb_cmd *req_common; + struct hclge_desc desc; + u8 loopback_en; + int pos = 0; + int ret; + + req_app = (struct hclge_config_mac_mode_cmd *)desc.data; + req_common = (struct hclge_common_lb_cmd *)desc.data; + + pos += scnprintf(buf + pos, len - pos, "mac id: %u\n", + hdev->hw.mac.mac_id); + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to dump app loopback status, ret = %d\n", ret); + return ret; + } + + loopback_en = hnae3_get_bit(le32_to_cpu(req_app->txrx_pad_fcs_loop_en), + HCLGE_MAC_APP_LP_B); + pos += scnprintf(buf + pos, len - pos, "app loopback: %s\n", + state_str[loopback_en]); + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to dump common loopback status, ret = %d\n", + ret); + return ret; + } + + loopback_en = req_common->enable & HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B; + pos += scnprintf(buf + pos, len - pos, "serdes serial loopback: %s\n", + state_str[loopback_en]); + + loopback_en = req_common->enable & + HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B ? 1 : 0; + pos += scnprintf(buf + pos, len - pos, "serdes parallel loopback: %s\n", + state_str[loopback_en]); + + if (phydev) { + loopback_en = phydev->loopback_enabled; + pos += scnprintf(buf + pos, len - pos, "phy loopback: %s\n", + state_str[loopback_en]); + } else if (hnae3_dev_phy_imp_supported(hdev)) { + loopback_en = req_common->enable & + HCLGE_CMD_GE_PHY_INNER_LOOP_B; + pos += scnprintf(buf + pos, len - pos, "phy loopback: %s\n", + state_str[loopback_en]); + } + + return 0; +} + +/* hclge_dbg_dump_mac_tnl_status: print message about mac tnl interrupt + * @hdev: pointer to struct hclge_dev + */ +static int +hclge_dbg_dump_mac_tnl_status(struct hclge_dev *hdev, char *buf, int len) +{ + struct hclge_mac_tnl_stats stats; + unsigned long rem_nsec; + int pos = 0; + + pos += scnprintf(buf + pos, len - pos, + "Recently generated mac tnl interruption:\n"); + + while (kfifo_get(&hdev->mac_tnl_log, &stats)) { + rem_nsec = do_div(stats.time, HCLGE_BILLION_NANO_SECONDS); + + pos += scnprintf(buf + pos, len - pos, + "[%07lu.%03lu] status = 0x%x\n", + (unsigned long)stats.time, rem_nsec / 1000, + stats.status); + } + + return 0; +} + + +static const struct hclge_dbg_item mac_list_items[] = { + { "FUNC_ID", 2 }, + { "MAC_ADDR", 12 }, + { "STATE", 2 }, +}; + +static void hclge_dbg_dump_mac_list(struct hclge_dev *hdev, char *buf, int len, + bool is_unicast) +{ + char data_str[ARRAY_SIZE(mac_list_items)][HCLGE_DBG_DATA_STR_LEN]; + char content[HCLGE_DBG_INFO_LEN], str_id[HCLGE_DBG_ID_LEN]; + char *result[ARRAY_SIZE(mac_list_items)]; + struct hclge_mac_node *mac_node, *tmp; + struct hclge_vport *vport; + struct list_head *list; + u32 func_id; + int pos = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(mac_list_items); i++) + result[i] = &data_str[i][0]; + + pos += scnprintf(buf + pos, len - pos, "%s MAC_LIST:\n", + is_unicast ? "UC" : "MC"); + hclge_dbg_fill_content(content, sizeof(content), mac_list_items, + NULL, ARRAY_SIZE(mac_list_items)); + pos += scnprintf(buf + pos, len - pos, "%s", content); + + for (func_id = 0; func_id < hdev->num_alloc_vport; func_id++) { + vport = &hdev->vport[func_id]; + list = is_unicast ? &vport->uc_mac_list : &vport->mc_mac_list; + spin_lock_bh(&vport->mac_list_lock); + list_for_each_entry_safe(mac_node, tmp, list, node) { + i = 0; + result[i++] = hclge_dbg_get_func_id_str(str_id, + func_id); + sprintf(result[i++], "%pM", mac_node->mac_addr); + sprintf(result[i++], "%5s", + hclge_mac_state_str[mac_node->state]); + hclge_dbg_fill_content(content, sizeof(content), + mac_list_items, + (const char **)result, + ARRAY_SIZE(mac_list_items)); + pos += scnprintf(buf + pos, len - pos, "%s", content); + } + spin_unlock_bh(&vport->mac_list_lock); + } +} + +static int hclge_dbg_dump_umv_info(struct hclge_dev *hdev, char *buf, int len) +{ + u8 func_num = pci_num_vf(hdev->pdev) + 1; + struct hclge_vport *vport; + int pos = 0; + u8 i; + + pos += scnprintf(buf, len, "num_alloc_vport : %u\n", + hdev->num_alloc_vport); + pos += scnprintf(buf + pos, len - pos, "max_umv_size : %u\n", + hdev->max_umv_size); + pos += scnprintf(buf + pos, len - pos, "wanted_umv_size : %u\n", + hdev->wanted_umv_size); + pos += scnprintf(buf + pos, len - pos, "priv_umv_size : %u\n", + hdev->priv_umv_size); + + mutex_lock(&hdev->vport_lock); + pos += scnprintf(buf + pos, len - pos, "share_umv_size : %u\n", + hdev->share_umv_size); + for (i = 0; i < func_num; i++) { + vport = &hdev->vport[i]; + pos += scnprintf(buf + pos, len - pos, + "vport(%u) used_umv_num : %u\n", + i, vport->used_umv_num); + } + mutex_unlock(&hdev->vport_lock); + + pos += scnprintf(buf + pos, len - pos, "used_mc_mac_num : %u\n", + hdev->used_mc_mac_num); + + return 0; +} + +static int hclge_get_vlan_rx_offload_cfg(struct hclge_dev *hdev, u8 vf_id, + struct hclge_dbg_vlan_cfg *vlan_cfg) +{ + struct hclge_vport_vtag_rx_cfg_cmd *req; + struct hclge_desc desc; + u16 bmap_index; + u8 rx_cfg; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, true); + + req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data; + req->vf_offset = vf_id / HCLGE_VF_NUM_PER_CMD; + bmap_index = vf_id % HCLGE_VF_NUM_PER_CMD / HCLGE_VF_NUM_PER_BYTE; + req->vf_bitmap[bmap_index] = 1U << (vf_id % HCLGE_VF_NUM_PER_BYTE); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get vport%u rxvlan cfg, ret = %d\n", + vf_id, ret); + return ret; + } + + rx_cfg = req->vport_vlan_cfg; + vlan_cfg->strip_tag1 = hnae3_get_bit(rx_cfg, HCLGE_REM_TAG1_EN_B); + vlan_cfg->strip_tag2 = hnae3_get_bit(rx_cfg, HCLGE_REM_TAG2_EN_B); + vlan_cfg->drop_tag1 = hnae3_get_bit(rx_cfg, HCLGE_DISCARD_TAG1_EN_B); + vlan_cfg->drop_tag2 = hnae3_get_bit(rx_cfg, HCLGE_DISCARD_TAG2_EN_B); + vlan_cfg->pri_only1 = hnae3_get_bit(rx_cfg, HCLGE_SHOW_TAG1_EN_B); + vlan_cfg->pri_only2 = hnae3_get_bit(rx_cfg, HCLGE_SHOW_TAG2_EN_B); + + return 0; +} + +static int hclge_get_vlan_tx_offload_cfg(struct hclge_dev *hdev, u8 vf_id, + struct hclge_dbg_vlan_cfg *vlan_cfg) +{ + struct hclge_vport_vtag_tx_cfg_cmd *req; + struct hclge_desc desc; + u16 bmap_index; + u8 tx_cfg; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, true); + req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data; + req->vf_offset = vf_id / HCLGE_VF_NUM_PER_CMD; + bmap_index = vf_id % HCLGE_VF_NUM_PER_CMD / HCLGE_VF_NUM_PER_BYTE; + req->vf_bitmap[bmap_index] = 1U << (vf_id % HCLGE_VF_NUM_PER_BYTE); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get vport%u txvlan cfg, ret = %d\n", + vf_id, ret); + return ret; + } + + tx_cfg = req->vport_vlan_cfg; + vlan_cfg->pvid = le16_to_cpu(req->def_vlan_tag1); + + vlan_cfg->accept_tag1 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_TAG1_B); + vlan_cfg->accept_tag2 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_TAG2_B); + vlan_cfg->accept_untag1 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_UNTAG1_B); + vlan_cfg->accept_untag2 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_UNTAG2_B); + vlan_cfg->insert_tag1 = hnae3_get_bit(tx_cfg, HCLGE_PORT_INS_TAG1_EN_B); + vlan_cfg->insert_tag2 = hnae3_get_bit(tx_cfg, HCLGE_PORT_INS_TAG2_EN_B); + vlan_cfg->shift_tag = hnae3_get_bit(tx_cfg, HCLGE_TAG_SHIFT_MODE_EN_B); + + return 0; +} + +static int hclge_get_vlan_filter_config_cmd(struct hclge_dev *hdev, + u8 vlan_type, u8 vf_id, + struct hclge_desc *desc) +{ + struct hclge_vlan_filter_ctrl_cmd *req; + int ret; + + hclge_cmd_setup_basic_desc(desc, HCLGE_OPC_VLAN_FILTER_CTRL, true); + req = (struct hclge_vlan_filter_ctrl_cmd *)desc->data; + req->vlan_type = vlan_type; + req->vf_id = vf_id; + + ret = hclge_cmd_send(&hdev->hw, desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to get vport%u vlan filter config, ret = %d.\n", + vf_id, ret); + + return ret; +} + +static int hclge_get_vlan_filter_state(struct hclge_dev *hdev, u8 vlan_type, + u8 vf_id, u8 *vlan_fe) +{ + struct hclge_vlan_filter_ctrl_cmd *req; + struct hclge_desc desc; + int ret; + + ret = hclge_get_vlan_filter_config_cmd(hdev, vlan_type, vf_id, &desc); + if (ret) + return ret; + + req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data; + *vlan_fe = req->vlan_fe; + + return 0; +} + +static int hclge_get_port_vlan_filter_bypass_state(struct hclge_dev *hdev, + u8 vf_id, u8 *bypass_en) +{ + struct hclge_port_vlan_filter_bypass_cmd *req; + struct hclge_desc desc; + int ret; + + if (!test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, hdev->ae_dev->caps)) + return 0; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PORT_VLAN_BYPASS, true); + req = (struct hclge_port_vlan_filter_bypass_cmd *)desc.data; + req->vf_id = vf_id; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get vport%u port vlan filter bypass state, ret = %d.\n", + vf_id, ret); + return ret; + } + + *bypass_en = hnae3_get_bit(req->bypass_state, HCLGE_INGRESS_BYPASS_B); + + return 0; +} + +static const struct hclge_dbg_item vlan_filter_items[] = { + { "FUNC_ID", 2 }, + { "I_VF_VLAN_FILTER", 2 }, + { "E_VF_VLAN_FILTER", 2 }, + { "PORT_VLAN_FILTER_BYPASS", 0 } +}; + +static const struct hclge_dbg_item vlan_offload_items[] = { + { "FUNC_ID", 2 }, + { "PVID", 4 }, + { "ACCEPT_TAG1", 2 }, + { "ACCEPT_TAG2", 2 }, + { "ACCEPT_UNTAG1", 2 }, + { "ACCEPT_UNTAG2", 2 }, + { "INSERT_TAG1", 2 }, + { "INSERT_TAG2", 2 }, + { "SHIFT_TAG", 2 }, + { "STRIP_TAG1", 2 }, + { "STRIP_TAG2", 2 }, + { "DROP_TAG1", 2 }, + { "DROP_TAG2", 2 }, + { "PRI_ONLY_TAG1", 2 }, + { "PRI_ONLY_TAG2", 0 } +}; + +static int hclge_dbg_dump_vlan_filter_config(struct hclge_dev *hdev, char *buf, + int len, int *pos) +{ + char content[HCLGE_DBG_VLAN_FLTR_INFO_LEN], str_id[HCLGE_DBG_ID_LEN]; + const char *result[ARRAY_SIZE(vlan_filter_items)]; + u8 i, j, vlan_fe, bypass, ingress, egress; + u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */ + int ret; + + ret = hclge_get_vlan_filter_state(hdev, HCLGE_FILTER_TYPE_PORT, 0, + &vlan_fe); + if (ret) + return ret; + ingress = vlan_fe & HCLGE_FILTER_FE_NIC_INGRESS_B; + egress = vlan_fe & HCLGE_FILTER_FE_NIC_EGRESS_B ? 1 : 0; + + *pos += scnprintf(buf, len, "I_PORT_VLAN_FILTER: %s\n", + state_str[ingress]); + *pos += scnprintf(buf + *pos, len - *pos, "E_PORT_VLAN_FILTER: %s\n", + state_str[egress]); + + hclge_dbg_fill_content(content, sizeof(content), vlan_filter_items, + NULL, ARRAY_SIZE(vlan_filter_items)); + *pos += scnprintf(buf + *pos, len - *pos, "%s", content); + + for (i = 0; i < func_num; i++) { + ret = hclge_get_vlan_filter_state(hdev, HCLGE_FILTER_TYPE_VF, i, + &vlan_fe); + if (ret) + return ret; + + ingress = vlan_fe & HCLGE_FILTER_FE_NIC_INGRESS_B; + egress = vlan_fe & HCLGE_FILTER_FE_NIC_EGRESS_B ? 1 : 0; + ret = hclge_get_port_vlan_filter_bypass_state(hdev, i, &bypass); + if (ret) + return ret; + j = 0; + result[j++] = hclge_dbg_get_func_id_str(str_id, i); + result[j++] = state_str[ingress]; + result[j++] = state_str[egress]; + result[j++] = + test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, + hdev->ae_dev->caps) ? state_str[bypass] : "NA"; + hclge_dbg_fill_content(content, sizeof(content), + vlan_filter_items, result, + ARRAY_SIZE(vlan_filter_items)); + *pos += scnprintf(buf + *pos, len - *pos, "%s", content); + } + *pos += scnprintf(buf + *pos, len - *pos, "\n"); + + return 0; +} + +static int hclge_dbg_dump_vlan_offload_config(struct hclge_dev *hdev, char *buf, + int len, int *pos) +{ + char str_id[HCLGE_DBG_ID_LEN], str_pvid[HCLGE_DBG_ID_LEN]; + const char *result[ARRAY_SIZE(vlan_offload_items)]; + char content[HCLGE_DBG_VLAN_OFFLOAD_INFO_LEN]; + u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */ + struct hclge_dbg_vlan_cfg vlan_cfg; + int ret; + u8 i, j; + + hclge_dbg_fill_content(content, sizeof(content), vlan_offload_items, + NULL, ARRAY_SIZE(vlan_offload_items)); + *pos += scnprintf(buf + *pos, len - *pos, "%s", content); + + for (i = 0; i < func_num; i++) { + ret = hclge_get_vlan_tx_offload_cfg(hdev, i, &vlan_cfg); + if (ret) + return ret; + + ret = hclge_get_vlan_rx_offload_cfg(hdev, i, &vlan_cfg); + if (ret) + return ret; + + sprintf(str_pvid, "%u", vlan_cfg.pvid); + j = 0; + result[j++] = hclge_dbg_get_func_id_str(str_id, i); + result[j++] = str_pvid; + result[j++] = state_str[vlan_cfg.accept_tag1]; + result[j++] = state_str[vlan_cfg.accept_tag2]; + result[j++] = state_str[vlan_cfg.accept_untag1]; + result[j++] = state_str[vlan_cfg.accept_untag2]; + result[j++] = state_str[vlan_cfg.insert_tag1]; + result[j++] = state_str[vlan_cfg.insert_tag2]; + result[j++] = state_str[vlan_cfg.shift_tag]; + result[j++] = state_str[vlan_cfg.strip_tag1]; + result[j++] = state_str[vlan_cfg.strip_tag2]; + result[j++] = state_str[vlan_cfg.drop_tag1]; + result[j++] = state_str[vlan_cfg.drop_tag2]; + result[j++] = state_str[vlan_cfg.pri_only1]; + result[j++] = state_str[vlan_cfg.pri_only2]; + + hclge_dbg_fill_content(content, sizeof(content), + vlan_offload_items, result, + ARRAY_SIZE(vlan_offload_items)); + *pos += scnprintf(buf + *pos, len - *pos, "%s", content); + } + + return 0; +} + +static int hclge_dbg_dump_vlan_config(struct hclge_dev *hdev, char *buf, + int len) +{ + int pos = 0; + int ret; + + ret = hclge_dbg_dump_vlan_filter_config(hdev, buf, len, &pos); + if (ret) + return ret; + + return hclge_dbg_dump_vlan_offload_config(hdev, buf, len, &pos); +} + +static int hclge_dbg_dump_ptp_info(struct hclge_dev *hdev, char *buf, int len) +{ + struct hclge_ptp *ptp = hdev->ptp; + u32 sw_cfg = ptp->ptp_cfg; + unsigned int tx_start; + unsigned int last_rx; + int pos = 0; + u32 hw_cfg; + int ret; + + pos += scnprintf(buf + pos, len - pos, "phc %s's debug info:\n", + ptp->info.name); + pos += scnprintf(buf + pos, len - pos, "ptp enable: %s\n", + test_bit(HCLGE_PTP_FLAG_EN, &ptp->flags) ? + "yes" : "no"); + pos += scnprintf(buf + pos, len - pos, "ptp tx enable: %s\n", + test_bit(HCLGE_PTP_FLAG_TX_EN, &ptp->flags) ? + "yes" : "no"); + pos += scnprintf(buf + pos, len - pos, "ptp rx enable: %s\n", + test_bit(HCLGE_PTP_FLAG_RX_EN, &ptp->flags) ? + "yes" : "no"); + + last_rx = jiffies_to_msecs(ptp->last_rx); + pos += scnprintf(buf + pos, len - pos, "last rx time: %lu.%lu\n", + last_rx / MSEC_PER_SEC, last_rx % MSEC_PER_SEC); + pos += scnprintf(buf + pos, len - pos, "rx count: %lu\n", ptp->rx_cnt); + + tx_start = jiffies_to_msecs(ptp->tx_start); + pos += scnprintf(buf + pos, len - pos, "last tx start time: %lu.%lu\n", + tx_start / MSEC_PER_SEC, tx_start % MSEC_PER_SEC); + pos += scnprintf(buf + pos, len - pos, "tx count: %lu\n", ptp->tx_cnt); + pos += scnprintf(buf + pos, len - pos, "tx skipped count: %lu\n", + ptp->tx_skipped); + pos += scnprintf(buf + pos, len - pos, "tx timeout count: %lu\n", + ptp->tx_timeout); + pos += scnprintf(buf + pos, len - pos, "last tx seqid: %u\n", + ptp->last_tx_seqid); + + ret = hclge_ptp_cfg_qry(hdev, &hw_cfg); + if (ret) + return ret; + + pos += scnprintf(buf + pos, len - pos, "sw_cfg: %#x, hw_cfg: %#x\n", + sw_cfg, hw_cfg); + + pos += scnprintf(buf + pos, len - pos, "tx type: %d, rx filter: %d\n", + ptp->ts_cfg.tx_type, ptp->ts_cfg.rx_filter); + + return 0; +} + +static int hclge_dbg_dump_mac_uc(struct hclge_dev *hdev, char *buf, int len) +{ + hclge_dbg_dump_mac_list(hdev, buf, len, true); + + return 0; +} + +static int hclge_dbg_dump_mac_mc(struct hclge_dev *hdev, char *buf, int len) +{ + hclge_dbg_dump_mac_list(hdev, buf, len, false); + + return 0; +} + +static const struct hclge_dbg_func hclge_dbg_cmd_func[] = { + { + .cmd = HNAE3_DBG_CMD_TM_NODES, + .dbg_dump = hclge_dbg_dump_tm_nodes, + }, + { + .cmd = HNAE3_DBG_CMD_TM_PRI, + .dbg_dump = hclge_dbg_dump_tm_pri, + }, + { + .cmd = HNAE3_DBG_CMD_TM_QSET, + .dbg_dump = hclge_dbg_dump_tm_qset, + }, + { + .cmd = HNAE3_DBG_CMD_TM_MAP, + .dbg_dump = hclge_dbg_dump_tm_map, + }, + { + .cmd = HNAE3_DBG_CMD_TM_PG, + .dbg_dump = hclge_dbg_dump_tm_pg, + }, + { + .cmd = HNAE3_DBG_CMD_TM_PORT, + .dbg_dump = hclge_dbg_dump_tm_port, + }, + { + .cmd = HNAE3_DBG_CMD_TC_SCH_INFO, + .dbg_dump = hclge_dbg_dump_tc, + }, + { + .cmd = HNAE3_DBG_CMD_QOS_PAUSE_CFG, + .dbg_dump = hclge_dbg_dump_qos_pause_cfg, + }, + { + .cmd = HNAE3_DBG_CMD_QOS_PRI_MAP, + .dbg_dump = hclge_dbg_dump_qos_pri_map, + }, + { + .cmd = HNAE3_DBG_CMD_QOS_DSCP_MAP, + .dbg_dump = hclge_dbg_dump_qos_dscp_map, + }, + { + .cmd = HNAE3_DBG_CMD_QOS_BUF_CFG, + .dbg_dump = hclge_dbg_dump_qos_buf_cfg, + }, + { + .cmd = HNAE3_DBG_CMD_MAC_UC, + .dbg_dump = hclge_dbg_dump_mac_uc, + }, + { + .cmd = HNAE3_DBG_CMD_MAC_MC, + .dbg_dump = hclge_dbg_dump_mac_mc, + }, + { + .cmd = HNAE3_DBG_CMD_MNG_TBL, + .dbg_dump = hclge_dbg_dump_mng_table, + }, + { + .cmd = HNAE3_DBG_CMD_LOOPBACK, + .dbg_dump = hclge_dbg_dump_loopback, + }, + { + .cmd = HNAE3_DBG_CMD_PTP_INFO, + .dbg_dump = hclge_dbg_dump_ptp_info, + }, + { + .cmd = HNAE3_DBG_CMD_INTERRUPT_INFO, + .dbg_dump = hclge_dbg_dump_interrupt, + }, + { + .cmd = HNAE3_DBG_CMD_RESET_INFO, + .dbg_dump = hclge_dbg_dump_rst_info, + }, + { + .cmd = HNAE3_DBG_CMD_IMP_INFO, + .dbg_dump = hclge_dbg_get_imp_stats_info, + }, + { + .cmd = HNAE3_DBG_CMD_NCL_CONFIG, + .dbg_dump = hclge_dbg_dump_ncl_config, + }, + { + .cmd = HNAE3_DBG_CMD_REG_BIOS_COMMON, + .dbg_dump_reg = hclge_dbg_dump_reg_cmd, + }, + { + .cmd = HNAE3_DBG_CMD_REG_SSU, + .dbg_dump_reg = hclge_dbg_dump_reg_cmd, + }, + { + .cmd = HNAE3_DBG_CMD_REG_IGU_EGU, + .dbg_dump_reg = hclge_dbg_dump_reg_cmd, + }, + { + .cmd = HNAE3_DBG_CMD_REG_RPU, + .dbg_dump_reg = hclge_dbg_dump_reg_cmd, + }, + { + .cmd = HNAE3_DBG_CMD_REG_NCSI, + .dbg_dump_reg = hclge_dbg_dump_reg_cmd, + }, + { + .cmd = HNAE3_DBG_CMD_REG_RTC, + .dbg_dump_reg = hclge_dbg_dump_reg_cmd, + }, + { + .cmd = HNAE3_DBG_CMD_REG_PPP, + .dbg_dump_reg = hclge_dbg_dump_reg_cmd, + }, + { + .cmd = HNAE3_DBG_CMD_REG_RCB, + .dbg_dump_reg = hclge_dbg_dump_reg_cmd, + }, + { + .cmd = HNAE3_DBG_CMD_REG_TQP, + .dbg_dump_reg = hclge_dbg_dump_reg_cmd, + }, + { + .cmd = HNAE3_DBG_CMD_REG_MAC, + .dbg_dump = hclge_dbg_dump_mac, + }, + { + .cmd = HNAE3_DBG_CMD_REG_DCB, + .dbg_dump = hclge_dbg_dump_dcb, + }, + { + .cmd = HNAE3_DBG_CMD_FD_TCAM, + .dbg_dump = hclge_dbg_dump_fd_tcam, + }, + { + .cmd = HNAE3_DBG_CMD_MAC_TNL_STATUS, + .dbg_dump = hclge_dbg_dump_mac_tnl_status, + }, + { + .cmd = HNAE3_DBG_CMD_SERV_INFO, + .dbg_dump = hclge_dbg_dump_serv_info, + }, + { + .cmd = HNAE3_DBG_CMD_VLAN_CONFIG, + .dbg_dump = hclge_dbg_dump_vlan_config, + }, + { + .cmd = HNAE3_DBG_CMD_FD_COUNTER, + .dbg_dump = hclge_dbg_dump_fd_counter, + }, + { + .cmd = HNAE3_DBG_CMD_UMV_INFO, + .dbg_dump = hclge_dbg_dump_umv_info, + }, +}; + +int hclge_dbg_read_cmd(struct hnae3_handle *handle, enum hnae3_dbg_cmd cmd, + char *buf, int len) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + const struct hclge_dbg_func *cmd_func; + struct hclge_dev *hdev = vport->back; + u32 i; + + for (i = 0; i < ARRAY_SIZE(hclge_dbg_cmd_func); i++) { + if (cmd == hclge_dbg_cmd_func[i].cmd) { + cmd_func = &hclge_dbg_cmd_func[i]; + if (cmd_func->dbg_dump) + return cmd_func->dbg_dump(hdev, buf, len); + else + return cmd_func->dbg_dump_reg(hdev, cmd, buf, + len); + } + } + + dev_err(&hdev->pdev->dev, "invalid command(%d)\n", cmd); + return -EINVAL; +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h new file mode 100644 index 000000000..724052928 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h @@ -0,0 +1,774 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2018-2019 Hisilicon Limited. */ + +#ifndef __HCLGE_DEBUGFS_H +#define __HCLGE_DEBUGFS_H + +#include +#include "hclge_cmd.h" + +#define HCLGE_DBG_MNG_TBL_MAX 64 + +#define HCLGE_DBG_MNG_VLAN_MASK_B BIT(0) +#define HCLGE_DBG_MNG_MAC_MASK_B BIT(1) +#define HCLGE_DBG_MNG_ETHER_MASK_B BIT(2) +#define HCLGE_DBG_MNG_E_TYPE_B BIT(11) +#define HCLGE_DBG_MNG_DROP_B BIT(13) +#define HCLGE_DBG_MNG_VLAN_TAG 0x0FFF +#define HCLGE_DBG_MNG_PF_ID 0x0007 +#define HCLGE_DBG_MNG_VF_ID 0x00FF + +/* Get DFX BD number offset */ +#define HCLGE_DBG_DFX_BIOS_OFFSET 1 +#define HCLGE_DBG_DFX_SSU_0_OFFSET 2 +#define HCLGE_DBG_DFX_SSU_1_OFFSET 3 +#define HCLGE_DBG_DFX_IGU_OFFSET 4 +#define HCLGE_DBG_DFX_RPU_0_OFFSET 5 + +#define HCLGE_DBG_DFX_RPU_1_OFFSET 6 +#define HCLGE_DBG_DFX_NCSI_OFFSET 7 +#define HCLGE_DBG_DFX_RTC_OFFSET 8 +#define HCLGE_DBG_DFX_PPP_OFFSET 9 +#define HCLGE_DBG_DFX_RCB_OFFSET 10 +#define HCLGE_DBG_DFX_TQP_OFFSET 11 + +#define HCLGE_DBG_DFX_SSU_2_OFFSET 12 + +struct hclge_qos_pri_map_cmd { + u8 pri0_tc : 4, + pri1_tc : 4; + u8 pri2_tc : 4, + pri3_tc : 4; + u8 pri4_tc : 4, + pri5_tc : 4; + u8 pri6_tc : 4, + pri7_tc : 4; + u8 vlan_pri : 4, + rev : 4; +}; + +struct hclge_dbg_bitmap_cmd { + union { + u8 bitmap; + struct { + u8 bit0 : 1, + bit1 : 1, + bit2 : 1, + bit3 : 1, + bit4 : 1, + bit5 : 1, + bit6 : 1, + bit7 : 1; + }; + }; +}; + +struct hclge_dbg_reg_common_msg { + int msg_num; + int offset; + enum hclge_opcode_type cmd; +}; + +struct hclge_dbg_tcam_msg { + u8 stage; + u32 loc; +}; + +#define HCLGE_DBG_MAX_DFX_MSG_LEN 60 +struct hclge_dbg_dfx_message { + int flag; + char message[HCLGE_DBG_MAX_DFX_MSG_LEN]; +}; + +#define HCLGE_DBG_MAC_REG_TYPE_LEN 32 +struct hclge_dbg_reg_type_info { + enum hnae3_dbg_cmd cmd; + const struct hclge_dbg_dfx_message *dfx_msg; + struct hclge_dbg_reg_common_msg reg_msg; +}; + +struct hclge_dbg_func { + enum hnae3_dbg_cmd cmd; + int (*dbg_dump)(struct hclge_dev *hdev, char *buf, int len); + int (*dbg_dump_reg)(struct hclge_dev *hdev, enum hnae3_dbg_cmd cmd, + char *buf, int len); +}; + +struct hclge_dbg_status_dfx_info { + u32 offset; + char message[HCLGE_DBG_MAX_DFX_MSG_LEN]; +}; + +static const struct hclge_dbg_dfx_message hclge_dbg_bios_common_reg[] = { + {false, "Reserved"}, + {true, "BP_CPU_STATE"}, + {true, "DFX_MSIX_INFO_NIC_0"}, + {true, "DFX_MSIX_INFO_NIC_1"}, + {true, "DFX_MSIX_INFO_NIC_2"}, + {true, "DFX_MSIX_INFO_NIC_3"}, + + {true, "DFX_MSIX_INFO_ROC_0"}, + {true, "DFX_MSIX_INFO_ROC_1"}, + {true, "DFX_MSIX_INFO_ROC_2"}, + {true, "DFX_MSIX_INFO_ROC_3"}, + {false, "Reserved"}, + {false, "Reserved"}, +}; + +static const struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_0[] = { + {false, "Reserved"}, + {true, "SSU_ETS_PORT_STATUS"}, + {true, "SSU_ETS_TCG_STATUS"}, + {false, "Reserved"}, + {false, "Reserved"}, + {true, "SSU_BP_STATUS_0"}, + + {true, "SSU_BP_STATUS_1"}, + {true, "SSU_BP_STATUS_2"}, + {true, "SSU_BP_STATUS_3"}, + {true, "SSU_BP_STATUS_4"}, + {true, "SSU_BP_STATUS_5"}, + {true, "SSU_MAC_TX_PFC_IND"}, + + {true, "MAC_SSU_RX_PFC_IND"}, + {true, "BTMP_AGEING_ST_B0"}, + {true, "BTMP_AGEING_ST_B1"}, + {true, "BTMP_AGEING_ST_B2"}, + {false, "Reserved"}, + {false, "Reserved"}, + + {true, "FULL_DROP_NUM"}, + {true, "PART_DROP_NUM"}, + {true, "PPP_KEY_DROP_NUM"}, + {true, "PPP_RLT_DROP_NUM"}, + {true, "LO_PRI_UNICAST_RLT_DROP_NUM"}, + {true, "HI_PRI_MULTICAST_RLT_DROP_NUM"}, + + {true, "LO_PRI_MULTICAST_RLT_DROP_NUM"}, + {true, "NCSI_PACKET_CURR_BUFFER_CNT"}, + {true, "BTMP_AGEING_RLS_CNT_BANK0"}, + {true, "BTMP_AGEING_RLS_CNT_BANK1"}, + {true, "BTMP_AGEING_RLS_CNT_BANK2"}, + {true, "SSU_MB_RD_RLT_DROP_CNT"}, + + {true, "SSU_PPP_MAC_KEY_NUM_L"}, + {true, "SSU_PPP_MAC_KEY_NUM_H"}, + {true, "SSU_PPP_HOST_KEY_NUM_L"}, + {true, "SSU_PPP_HOST_KEY_NUM_H"}, + {true, "PPP_SSU_MAC_RLT_NUM_L"}, + {true, "PPP_SSU_MAC_RLT_NUM_H"}, + + {true, "PPP_SSU_HOST_RLT_NUM_L"}, + {true, "PPP_SSU_HOST_RLT_NUM_H"}, + {true, "NCSI_RX_PACKET_IN_CNT_L"}, + {true, "NCSI_RX_PACKET_IN_CNT_H"}, + {true, "NCSI_TX_PACKET_OUT_CNT_L"}, + {true, "NCSI_TX_PACKET_OUT_CNT_H"}, + + {true, "SSU_KEY_DROP_NUM"}, + {true, "MB_UNCOPY_NUM"}, + {true, "RX_OQ_DROP_PKT_CNT"}, + {true, "TX_OQ_DROP_PKT_CNT"}, + {true, "BANK_UNBALANCE_DROP_CNT"}, + {true, "BANK_UNBALANCE_RX_DROP_CNT"}, + + {true, "NIC_L2_ERR_DROP_PKT_CNT"}, + {true, "ROC_L2_ERR_DROP_PKT_CNT"}, + {true, "NIC_L2_ERR_DROP_PKT_CNT_RX"}, + {true, "ROC_L2_ERR_DROP_PKT_CNT_RX"}, + {true, "RX_OQ_GLB_DROP_PKT_CNT"}, + {false, "Reserved"}, + + {true, "LO_PRI_UNICAST_CUR_CNT"}, + {true, "HI_PRI_MULTICAST_CUR_CNT"}, + {true, "LO_PRI_MULTICAST_CUR_CNT"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, +}; + +static const struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_1[] = { + {true, "prt_id"}, + {true, "PACKET_TC_CURR_BUFFER_CNT_0"}, + {true, "PACKET_TC_CURR_BUFFER_CNT_1"}, + {true, "PACKET_TC_CURR_BUFFER_CNT_2"}, + {true, "PACKET_TC_CURR_BUFFER_CNT_3"}, + {true, "PACKET_TC_CURR_BUFFER_CNT_4"}, + + {true, "PACKET_TC_CURR_BUFFER_CNT_5"}, + {true, "PACKET_TC_CURR_BUFFER_CNT_6"}, + {true, "PACKET_TC_CURR_BUFFER_CNT_7"}, + {true, "PACKET_CURR_BUFFER_CNT"}, + {false, "Reserved"}, + {false, "Reserved"}, + + {true, "RX_PACKET_IN_CNT_L"}, + {true, "RX_PACKET_IN_CNT_H"}, + {true, "RX_PACKET_OUT_CNT_L"}, + {true, "RX_PACKET_OUT_CNT_H"}, + {true, "TX_PACKET_IN_CNT_L"}, + {true, "TX_PACKET_IN_CNT_H"}, + + {true, "TX_PACKET_OUT_CNT_L"}, + {true, "TX_PACKET_OUT_CNT_H"}, + {true, "ROC_RX_PACKET_IN_CNT_L"}, + {true, "ROC_RX_PACKET_IN_CNT_H"}, + {true, "ROC_TX_PACKET_OUT_CNT_L"}, + {true, "ROC_TX_PACKET_OUT_CNT_H"}, + + {true, "RX_PACKET_TC_IN_CNT_0_L"}, + {true, "RX_PACKET_TC_IN_CNT_0_H"}, + {true, "RX_PACKET_TC_IN_CNT_1_L"}, + {true, "RX_PACKET_TC_IN_CNT_1_H"}, + {true, "RX_PACKET_TC_IN_CNT_2_L"}, + {true, "RX_PACKET_TC_IN_CNT_2_H"}, + + {true, "RX_PACKET_TC_IN_CNT_3_L"}, + {true, "RX_PACKET_TC_IN_CNT_3_H"}, + {true, "RX_PACKET_TC_IN_CNT_4_L"}, + {true, "RX_PACKET_TC_IN_CNT_4_H"}, + {true, "RX_PACKET_TC_IN_CNT_5_L"}, + {true, "RX_PACKET_TC_IN_CNT_5_H"}, + + {true, "RX_PACKET_TC_IN_CNT_6_L"}, + {true, "RX_PACKET_TC_IN_CNT_6_H"}, + {true, "RX_PACKET_TC_IN_CNT_7_L"}, + {true, "RX_PACKET_TC_IN_CNT_7_H"}, + {true, "RX_PACKET_TC_OUT_CNT_0_L"}, + {true, "RX_PACKET_TC_OUT_CNT_0_H"}, + + {true, "RX_PACKET_TC_OUT_CNT_1_L"}, + {true, "RX_PACKET_TC_OUT_CNT_1_H"}, + {true, "RX_PACKET_TC_OUT_CNT_2_L"}, + {true, "RX_PACKET_TC_OUT_CNT_2_H"}, + {true, "RX_PACKET_TC_OUT_CNT_3_L"}, + {true, "RX_PACKET_TC_OUT_CNT_3_H"}, + + {true, "RX_PACKET_TC_OUT_CNT_4_L"}, + {true, "RX_PACKET_TC_OUT_CNT_4_H"}, + {true, "RX_PACKET_TC_OUT_CNT_5_L"}, + {true, "RX_PACKET_TC_OUT_CNT_5_H"}, + {true, "RX_PACKET_TC_OUT_CNT_6_L"}, + {true, "RX_PACKET_TC_OUT_CNT_6_H"}, + + {true, "RX_PACKET_TC_OUT_CNT_7_L"}, + {true, "RX_PACKET_TC_OUT_CNT_7_H"}, + {true, "TX_PACKET_TC_IN_CNT_0_L"}, + {true, "TX_PACKET_TC_IN_CNT_0_H"}, + {true, "TX_PACKET_TC_IN_CNT_1_L"}, + {true, "TX_PACKET_TC_IN_CNT_1_H"}, + + {true, "TX_PACKET_TC_IN_CNT_2_L"}, + {true, "TX_PACKET_TC_IN_CNT_2_H"}, + {true, "TX_PACKET_TC_IN_CNT_3_L"}, + {true, "TX_PACKET_TC_IN_CNT_3_H"}, + {true, "TX_PACKET_TC_IN_CNT_4_L"}, + {true, "TX_PACKET_TC_IN_CNT_4_H"}, + + {true, "TX_PACKET_TC_IN_CNT_5_L"}, + {true, "TX_PACKET_TC_IN_CNT_5_H"}, + {true, "TX_PACKET_TC_IN_CNT_6_L"}, + {true, "TX_PACKET_TC_IN_CNT_6_H"}, + {true, "TX_PACKET_TC_IN_CNT_7_L"}, + {true, "TX_PACKET_TC_IN_CNT_7_H"}, + + {true, "TX_PACKET_TC_OUT_CNT_0_L"}, + {true, "TX_PACKET_TC_OUT_CNT_0_H"}, + {true, "TX_PACKET_TC_OUT_CNT_1_L"}, + {true, "TX_PACKET_TC_OUT_CNT_1_H"}, + {true, "TX_PACKET_TC_OUT_CNT_2_L"}, + {true, "TX_PACKET_TC_OUT_CNT_2_H"}, + + {true, "TX_PACKET_TC_OUT_CNT_3_L"}, + {true, "TX_PACKET_TC_OUT_CNT_3_H"}, + {true, "TX_PACKET_TC_OUT_CNT_4_L"}, + {true, "TX_PACKET_TC_OUT_CNT_4_H"}, + {true, "TX_PACKET_TC_OUT_CNT_5_L"}, + {true, "TX_PACKET_TC_OUT_CNT_5_H"}, + + {true, "TX_PACKET_TC_OUT_CNT_6_L"}, + {true, "TX_PACKET_TC_OUT_CNT_6_H"}, + {true, "TX_PACKET_TC_OUT_CNT_7_L"}, + {true, "TX_PACKET_TC_OUT_CNT_7_H"}, + {false, "Reserved"}, + {false, "Reserved"}, +}; + +static const struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_2[] = { + {true, "OQ_INDEX"}, + {true, "QUEUE_CNT"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, +}; + +static const struct hclge_dbg_dfx_message hclge_dbg_igu_egu_reg[] = { + {true, "prt_id"}, + {true, "IGU_RX_ERR_PKT"}, + {true, "IGU_RX_NO_SOF_PKT"}, + {true, "EGU_TX_1588_SHORT_PKT"}, + {true, "EGU_TX_1588_PKT"}, + {true, "EGU_TX_ERR_PKT"}, + + {true, "IGU_RX_OUT_L2_PKT"}, + {true, "IGU_RX_OUT_L3_PKT"}, + {true, "IGU_RX_OUT_L4_PKT"}, + {true, "IGU_RX_IN_L2_PKT"}, + {true, "IGU_RX_IN_L3_PKT"}, + {true, "IGU_RX_IN_L4_PKT"}, + + {true, "IGU_RX_EL3E_PKT"}, + {true, "IGU_RX_EL4E_PKT"}, + {true, "IGU_RX_L3E_PKT"}, + {true, "IGU_RX_L4E_PKT"}, + {true, "IGU_RX_ROCEE_PKT"}, + {true, "IGU_RX_OUT_UDP0_PKT"}, + + {true, "IGU_RX_IN_UDP0_PKT"}, + {true, "IGU_MC_CAR_DROP_PKT_L"}, + {true, "IGU_MC_CAR_DROP_PKT_H"}, + {true, "IGU_BC_CAR_DROP_PKT_L"}, + {true, "IGU_BC_CAR_DROP_PKT_H"}, + {false, "Reserved"}, + + {true, "IGU_RX_OVERSIZE_PKT_L"}, + {true, "IGU_RX_OVERSIZE_PKT_H"}, + {true, "IGU_RX_UNDERSIZE_PKT_L"}, + {true, "IGU_RX_UNDERSIZE_PKT_H"}, + {true, "IGU_RX_OUT_ALL_PKT_L"}, + {true, "IGU_RX_OUT_ALL_PKT_H"}, + + {true, "IGU_TX_OUT_ALL_PKT_L"}, + {true, "IGU_TX_OUT_ALL_PKT_H"}, + {true, "IGU_RX_UNI_PKT_L"}, + {true, "IGU_RX_UNI_PKT_H"}, + {true, "IGU_RX_MULTI_PKT_L"}, + {true, "IGU_RX_MULTI_PKT_H"}, + + {true, "IGU_RX_BROAD_PKT_L"}, + {true, "IGU_RX_BROAD_PKT_H"}, + {true, "EGU_TX_OUT_ALL_PKT_L"}, + {true, "EGU_TX_OUT_ALL_PKT_H"}, + {true, "EGU_TX_UNI_PKT_L"}, + {true, "EGU_TX_UNI_PKT_H"}, + + {true, "EGU_TX_MULTI_PKT_L"}, + {true, "EGU_TX_MULTI_PKT_H"}, + {true, "EGU_TX_BROAD_PKT_L"}, + {true, "EGU_TX_BROAD_PKT_H"}, + {true, "IGU_TX_KEY_NUM_L"}, + {true, "IGU_TX_KEY_NUM_H"}, + + {true, "IGU_RX_NON_TUN_PKT_L"}, + {true, "IGU_RX_NON_TUN_PKT_H"}, + {true, "IGU_RX_TUN_PKT_L"}, + {true, "IGU_RX_TUN_PKT_H"}, + {false, "Reserved"}, + {false, "Reserved"}, +}; + +static const struct hclge_dbg_dfx_message hclge_dbg_rpu_reg_0[] = { + {true, "tc_queue_num"}, + {true, "FSM_DFX_ST0"}, + {true, "FSM_DFX_ST1"}, + {true, "RPU_RX_PKT_DROP_CNT"}, + {true, "BUF_WAIT_TIMEOUT"}, + {true, "BUF_WAIT_TIMEOUT_QID"}, +}; + +static const struct hclge_dbg_dfx_message hclge_dbg_rpu_reg_1[] = { + {false, "Reserved"}, + {true, "FIFO_DFX_ST0"}, + {true, "FIFO_DFX_ST1"}, + {true, "FIFO_DFX_ST2"}, + {true, "FIFO_DFX_ST3"}, + {true, "FIFO_DFX_ST4"}, + + {true, "FIFO_DFX_ST5"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, +}; + +static const struct hclge_dbg_dfx_message hclge_dbg_ncsi_reg[] = { + {false, "Reserved"}, + {true, "NCSI_EGU_TX_FIFO_STS"}, + {true, "NCSI_PAUSE_STATUS"}, + {true, "NCSI_RX_CTRL_DMAC_ERR_CNT"}, + {true, "NCSI_RX_CTRL_SMAC_ERR_CNT"}, + {true, "NCSI_RX_CTRL_CKS_ERR_CNT"}, + + {true, "NCSI_RX_CTRL_PKT_CNT"}, + {true, "NCSI_RX_PT_DMAC_ERR_CNT"}, + {true, "NCSI_RX_PT_SMAC_ERR_CNT"}, + {true, "NCSI_RX_PT_PKT_CNT"}, + {true, "NCSI_RX_FCS_ERR_CNT"}, + {true, "NCSI_TX_CTRL_DMAC_ERR_CNT"}, + + {true, "NCSI_TX_CTRL_SMAC_ERR_CNT"}, + {true, "NCSI_TX_CTRL_PKT_CNT"}, + {true, "NCSI_TX_PT_DMAC_ERR_CNT"}, + {true, "NCSI_TX_PT_SMAC_ERR_CNT"}, + {true, "NCSI_TX_PT_PKT_CNT"}, + {true, "NCSI_TX_PT_PKT_TRUNC_CNT"}, + + {true, "NCSI_TX_PT_PKT_ERR_CNT"}, + {true, "NCSI_TX_CTRL_PKT_ERR_CNT"}, + {true, "NCSI_RX_CTRL_PKT_TRUNC_CNT"}, + {true, "NCSI_RX_CTRL_PKT_CFLIT_CNT"}, + {false, "Reserved"}, + {false, "Reserved"}, + + {true, "NCSI_MAC_RX_OCTETS_OK"}, + {true, "NCSI_MAC_RX_OCTETS_BAD"}, + {true, "NCSI_MAC_RX_UC_PKTS"}, + {true, "NCSI_MAC_RX_MC_PKTS"}, + {true, "NCSI_MAC_RX_BC_PKTS"}, + {true, "NCSI_MAC_RX_PKTS_64OCTETS"}, + + {true, "NCSI_MAC_RX_PKTS_65TO127OCTETS"}, + {true, "NCSI_MAC_RX_PKTS_128TO255OCTETS"}, + {true, "NCSI_MAC_RX_PKTS_255TO511OCTETS"}, + {true, "NCSI_MAC_RX_PKTS_512TO1023OCTETS"}, + {true, "NCSI_MAC_RX_PKTS_1024TO1518OCTETS"}, + {true, "NCSI_MAC_RX_PKTS_1519TOMAXOCTETS"}, + + {true, "NCSI_MAC_RX_FCS_ERRORS"}, + {true, "NCSI_MAC_RX_LONG_ERRORS"}, + {true, "NCSI_MAC_RX_JABBER_ERRORS"}, + {true, "NCSI_MAC_RX_RUNT_ERR_CNT"}, + {true, "NCSI_MAC_RX_SHORT_ERR_CNT"}, + {true, "NCSI_MAC_RX_FILT_PKT_CNT"}, + + {true, "NCSI_MAC_RX_OCTETS_TOTAL_FILT"}, + {true, "NCSI_MAC_TX_OCTETS_OK"}, + {true, "NCSI_MAC_TX_OCTETS_BAD"}, + {true, "NCSI_MAC_TX_UC_PKTS"}, + {true, "NCSI_MAC_TX_MC_PKTS"}, + {true, "NCSI_MAC_TX_BC_PKTS"}, + + {true, "NCSI_MAC_TX_PKTS_64OCTETS"}, + {true, "NCSI_MAC_TX_PKTS_65TO127OCTETS"}, + {true, "NCSI_MAC_TX_PKTS_128TO255OCTETS"}, + {true, "NCSI_MAC_TX_PKTS_256TO511OCTETS"}, + {true, "NCSI_MAC_TX_PKTS_512TO1023OCTETS"}, + {true, "NCSI_MAC_TX_PKTS_1024TO1518OCTETS"}, + + {true, "NCSI_MAC_TX_PKTS_1519TOMAXOCTETS"}, + {true, "NCSI_MAC_TX_UNDERRUN"}, + {true, "NCSI_MAC_TX_CRC_ERROR"}, + {true, "NCSI_MAC_TX_PAUSE_FRAMES"}, + {true, "NCSI_MAC_RX_PAD_PKTS"}, + {true, "NCSI_MAC_RX_PAUSE_FRAMES"}, +}; + +static const struct hclge_dbg_dfx_message hclge_dbg_rtc_reg[] = { + {false, "Reserved"}, + {true, "LGE_IGU_AFIFO_DFX_0"}, + {true, "LGE_IGU_AFIFO_DFX_1"}, + {true, "LGE_IGU_AFIFO_DFX_2"}, + {true, "LGE_IGU_AFIFO_DFX_3"}, + {true, "LGE_IGU_AFIFO_DFX_4"}, + + {true, "LGE_IGU_AFIFO_DFX_5"}, + {true, "LGE_IGU_AFIFO_DFX_6"}, + {true, "LGE_IGU_AFIFO_DFX_7"}, + {true, "LGE_EGU_AFIFO_DFX_0"}, + {true, "LGE_EGU_AFIFO_DFX_1"}, + {true, "LGE_EGU_AFIFO_DFX_2"}, + + {true, "LGE_EGU_AFIFO_DFX_3"}, + {true, "LGE_EGU_AFIFO_DFX_4"}, + {true, "LGE_EGU_AFIFO_DFX_5"}, + {true, "LGE_EGU_AFIFO_DFX_6"}, + {true, "LGE_EGU_AFIFO_DFX_7"}, + {true, "CGE_IGU_AFIFO_DFX_0"}, + + {true, "CGE_IGU_AFIFO_DFX_1"}, + {true, "CGE_EGU_AFIFO_DFX_0"}, + {true, "CGE_EGU_AFIFO_DFX_1"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, +}; + +static const struct hclge_dbg_dfx_message hclge_dbg_ppp_reg[] = { + {false, "Reserved"}, + {true, "DROP_FROM_PRT_PKT_CNT"}, + {true, "DROP_FROM_HOST_PKT_CNT"}, + {true, "DROP_TX_VLAN_PROC_CNT"}, + {true, "DROP_MNG_CNT"}, + {true, "DROP_FD_CNT"}, + + {true, "DROP_NO_DST_CNT"}, + {true, "DROP_MC_MBID_FULL_CNT"}, + {true, "DROP_SC_FILTERED"}, + {true, "PPP_MC_DROP_PKT_CNT"}, + {true, "DROP_PT_CNT"}, + {true, "DROP_MAC_ANTI_SPOOF_CNT"}, + + {true, "DROP_IG_VFV_CNT"}, + {true, "DROP_IG_PRTV_CNT"}, + {true, "DROP_CNM_PFC_PAUSE_CNT"}, + {true, "DROP_TORUS_TC_CNT"}, + {true, "DROP_TORUS_LPBK_CNT"}, + {true, "PPP_HFS_STS"}, + + {true, "PPP_MC_RSLT_STS"}, + {true, "PPP_P3U_STS"}, + {true, "PPP_RSLT_DESCR_STS"}, + {true, "PPP_UMV_STS_0"}, + {true, "PPP_UMV_STS_1"}, + {true, "PPP_VFV_STS"}, + + {true, "PPP_GRO_KEY_CNT"}, + {true, "PPP_GRO_INFO_CNT"}, + {true, "PPP_GRO_DROP_CNT"}, + {true, "PPP_GRO_OUT_CNT"}, + {true, "PPP_GRO_KEY_MATCH_DATA_CNT"}, + {true, "PPP_GRO_KEY_MATCH_TCAM_CNT"}, + + {true, "PPP_GRO_INFO_MATCH_CNT"}, + {true, "PPP_GRO_FREE_ENTRY_CNT"}, + {true, "PPP_GRO_INNER_DFX_SIGNAL"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, + + {true, "GET_RX_PKT_CNT_L"}, + {true, "GET_RX_PKT_CNT_H"}, + {true, "GET_TX_PKT_CNT_L"}, + {true, "GET_TX_PKT_CNT_H"}, + {true, "SEND_UC_PRT2HOST_PKT_CNT_L"}, + {true, "SEND_UC_PRT2HOST_PKT_CNT_H"}, + + {true, "SEND_UC_PRT2PRT_PKT_CNT_L"}, + {true, "SEND_UC_PRT2PRT_PKT_CNT_H"}, + {true, "SEND_UC_HOST2HOST_PKT_CNT_L"}, + {true, "SEND_UC_HOST2HOST_PKT_CNT_H"}, + {true, "SEND_UC_HOST2PRT_PKT_CNT_L"}, + {true, "SEND_UC_HOST2PRT_PKT_CNT_H"}, + + {true, "SEND_MC_FROM_PRT_CNT_L"}, + {true, "SEND_MC_FROM_PRT_CNT_H"}, + {true, "SEND_MC_FROM_HOST_CNT_L"}, + {true, "SEND_MC_FROM_HOST_CNT_H"}, + {true, "SSU_MC_RD_CNT_L"}, + {true, "SSU_MC_RD_CNT_H"}, + + {true, "SSU_MC_DROP_CNT_L"}, + {true, "SSU_MC_DROP_CNT_H"}, + {true, "SSU_MC_RD_PKT_CNT_L"}, + {true, "SSU_MC_RD_PKT_CNT_H"}, + {true, "PPP_MC_2HOST_PKT_CNT_L"}, + {true, "PPP_MC_2HOST_PKT_CNT_H"}, + + {true, "PPP_MC_2PRT_PKT_CNT_L"}, + {true, "PPP_MC_2PRT_PKT_CNT_H"}, + {true, "NTSNOS_PKT_CNT_L"}, + {true, "NTSNOS_PKT_CNT_H"}, + {true, "NTUP_PKT_CNT_L"}, + {true, "NTUP_PKT_CNT_H"}, + + {true, "NTLCL_PKT_CNT_L"}, + {true, "NTLCL_PKT_CNT_H"}, + {true, "NTTGT_PKT_CNT_L"}, + {true, "NTTGT_PKT_CNT_H"}, + {true, "RTNS_PKT_CNT_L"}, + {true, "RTNS_PKT_CNT_H"}, + + {true, "RTLPBK_PKT_CNT_L"}, + {true, "RTLPBK_PKT_CNT_H"}, + {true, "NR_PKT_CNT_L"}, + {true, "NR_PKT_CNT_H"}, + {true, "RR_PKT_CNT_L"}, + {true, "RR_PKT_CNT_H"}, + + {true, "MNG_TBL_HIT_CNT_L"}, + {true, "MNG_TBL_HIT_CNT_H"}, + {true, "FD_TBL_HIT_CNT_L"}, + {true, "FD_TBL_HIT_CNT_H"}, + {true, "FD_LKUP_CNT_L"}, + {true, "FD_LKUP_CNT_H"}, + + {true, "BC_HIT_CNT_L"}, + {true, "BC_HIT_CNT_H"}, + {true, "UM_TBL_UC_HIT_CNT_L"}, + {true, "UM_TBL_UC_HIT_CNT_H"}, + {true, "UM_TBL_MC_HIT_CNT_L"}, + {true, "UM_TBL_MC_HIT_CNT_H"}, + + {true, "UM_TBL_VMDQ1_HIT_CNT_L"}, + {true, "UM_TBL_VMDQ1_HIT_CNT_H"}, + {true, "MTA_TBL_HIT_CNT_L"}, + {true, "MTA_TBL_HIT_CNT_H"}, + {true, "FWD_BONDING_HIT_CNT_L"}, + {true, "FWD_BONDING_HIT_CNT_H"}, + + {true, "PROMIS_TBL_HIT_CNT_L"}, + {true, "PROMIS_TBL_HIT_CNT_H"}, + {true, "GET_TUNL_PKT_CNT_L"}, + {true, "GET_TUNL_PKT_CNT_H"}, + {true, "GET_BMC_PKT_CNT_L"}, + {true, "GET_BMC_PKT_CNT_H"}, + + {true, "SEND_UC_PRT2BMC_PKT_CNT_L"}, + {true, "SEND_UC_PRT2BMC_PKT_CNT_H"}, + {true, "SEND_UC_HOST2BMC_PKT_CNT_L"}, + {true, "SEND_UC_HOST2BMC_PKT_CNT_H"}, + {true, "SEND_UC_BMC2HOST_PKT_CNT_L"}, + {true, "SEND_UC_BMC2HOST_PKT_CNT_H"}, + + {true, "SEND_UC_BMC2PRT_PKT_CNT_L"}, + {true, "SEND_UC_BMC2PRT_PKT_CNT_H"}, + {true, "PPP_MC_2BMC_PKT_CNT_L"}, + {true, "PPP_MC_2BMC_PKT_CNT_H"}, + {true, "VLAN_MIRR_CNT_L"}, + {true, "VLAN_MIRR_CNT_H"}, + + {true, "IG_MIRR_CNT_L"}, + {true, "IG_MIRR_CNT_H"}, + {true, "EG_MIRR_CNT_L"}, + {true, "EG_MIRR_CNT_H"}, + {true, "RX_DEFAULT_HOST_HIT_CNT_L"}, + {true, "RX_DEFAULT_HOST_HIT_CNT_H"}, + + {true, "LAN_PAIR_CNT_L"}, + {true, "LAN_PAIR_CNT_H"}, + {true, "UM_TBL_MC_HIT_PKT_CNT_L"}, + {true, "UM_TBL_MC_HIT_PKT_CNT_H"}, + {true, "MTA_TBL_HIT_PKT_CNT_L"}, + {true, "MTA_TBL_HIT_PKT_CNT_H"}, + + {true, "PROMIS_TBL_HIT_PKT_CNT_L"}, + {true, "PROMIS_TBL_HIT_PKT_CNT_H"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, +}; + +static const struct hclge_dbg_dfx_message hclge_dbg_rcb_reg[] = { + {false, "Reserved"}, + {true, "FSM_DFX_ST0"}, + {true, "FSM_DFX_ST1"}, + {true, "FSM_DFX_ST2"}, + {true, "FIFO_DFX_ST0"}, + {true, "FIFO_DFX_ST1"}, + + {true, "FIFO_DFX_ST2"}, + {true, "FIFO_DFX_ST3"}, + {true, "FIFO_DFX_ST4"}, + {true, "FIFO_DFX_ST5"}, + {true, "FIFO_DFX_ST6"}, + {true, "FIFO_DFX_ST7"}, + + {true, "FIFO_DFX_ST8"}, + {true, "FIFO_DFX_ST9"}, + {true, "FIFO_DFX_ST10"}, + {true, "FIFO_DFX_ST11"}, + {true, "Q_CREDIT_VLD_0"}, + {true, "Q_CREDIT_VLD_1"}, + + {true, "Q_CREDIT_VLD_2"}, + {true, "Q_CREDIT_VLD_3"}, + {true, "Q_CREDIT_VLD_4"}, + {true, "Q_CREDIT_VLD_5"}, + {true, "Q_CREDIT_VLD_6"}, + {true, "Q_CREDIT_VLD_7"}, + + {true, "Q_CREDIT_VLD_8"}, + {true, "Q_CREDIT_VLD_9"}, + {true, "Q_CREDIT_VLD_10"}, + {true, "Q_CREDIT_VLD_11"}, + {true, "Q_CREDIT_VLD_12"}, + {true, "Q_CREDIT_VLD_13"}, + + {true, "Q_CREDIT_VLD_14"}, + {true, "Q_CREDIT_VLD_15"}, + {true, "Q_CREDIT_VLD_16"}, + {true, "Q_CREDIT_VLD_17"}, + {true, "Q_CREDIT_VLD_18"}, + {true, "Q_CREDIT_VLD_19"}, + + {true, "Q_CREDIT_VLD_20"}, + {true, "Q_CREDIT_VLD_21"}, + {true, "Q_CREDIT_VLD_22"}, + {true, "Q_CREDIT_VLD_23"}, + {true, "Q_CREDIT_VLD_24"}, + {true, "Q_CREDIT_VLD_25"}, + + {true, "Q_CREDIT_VLD_26"}, + {true, "Q_CREDIT_VLD_27"}, + {true, "Q_CREDIT_VLD_28"}, + {true, "Q_CREDIT_VLD_29"}, + {true, "Q_CREDIT_VLD_30"}, + {true, "Q_CREDIT_VLD_31"}, + + {true, "GRO_BD_SERR_CNT"}, + {true, "GRO_CONTEXT_SERR_CNT"}, + {true, "RX_STASH_CFG_SERR_CNT"}, + {true, "AXI_RD_FBD_SERR_CNT"}, + {true, "GRO_BD_MERR_CNT"}, + {true, "GRO_CONTEXT_MERR_CNT"}, + + {true, "RX_STASH_CFG_MERR_CNT"}, + {true, "AXI_RD_FBD_MERR_CNT"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, +}; + +static const struct hclge_dbg_dfx_message hclge_dbg_tqp_reg[] = { + {true, "q_num"}, + {true, "RCB_CFG_RX_RING_TAIL"}, + {true, "RCB_CFG_RX_RING_HEAD"}, + {true, "RCB_CFG_RX_RING_FBDNUM"}, + {true, "RCB_CFG_RX_RING_OFFSET"}, + {true, "RCB_CFG_RX_RING_FBDOFFSET"}, + + {true, "RCB_CFG_RX_RING_PKTNUM_RECORD"}, + {true, "RCB_CFG_TX_RING_TAIL"}, + {true, "RCB_CFG_TX_RING_HEAD"}, + {true, "RCB_CFG_TX_RING_FBDNUM"}, + {true, "RCB_CFG_TX_RING_OFFSET"}, + {true, "RCB_CFG_TX_RING_EBDNUM"}, +}; + +#define HCLGE_DBG_INFO_LEN 256 +#define HCLGE_DBG_VLAN_FLTR_INFO_LEN 256 +#define HCLGE_DBG_VLAN_OFFLOAD_INFO_LEN 512 +#define HCLGE_DBG_ID_LEN 16 +#define HCLGE_DBG_ITEM_NAME_LEN 32 +#define HCLGE_DBG_DATA_STR_LEN 32 +#define HCLGE_DBG_TM_INFO_LEN 256 + +#define HCLGE_BILLION_NANO_SECONDS 1000000000 + +struct hclge_dbg_item { + char name[HCLGE_DBG_ITEM_NAME_LEN]; + u16 interval; /* blank numbers after the item */ +}; + +struct hclge_dbg_vlan_cfg { + u16 pvid; + u8 accept_tag1; + u8 accept_tag2; + u8 accept_untag1; + u8 accept_untag2; + u8 insert_tag1; + u8 insert_tag2; + u8 shift_tag; + u8 strip_tag1; + u8 strip_tag2; + u8 drop_tag1; + u8 drop_tag2; + u8 pri_only1; + u8 pri_only2; +}; + +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c new file mode 100644 index 000000000..4c441e6a5 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c @@ -0,0 +1,134 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2021 Hisilicon Limited. */ + +#include + +#include "hclge_devlink.h" + +static int hclge_devlink_info_get(struct devlink *devlink, + struct devlink_info_req *req, + struct netlink_ext_ack *extack) +{ +#define HCLGE_DEVLINK_FW_STRING_LEN 32 + struct hclge_devlink_priv *priv = devlink_priv(devlink); + char version_str[HCLGE_DEVLINK_FW_STRING_LEN]; + struct hclge_dev *hdev = priv->hdev; + int ret; + + ret = devlink_info_driver_name_put(req, KBUILD_MODNAME); + if (ret) + return ret; + + snprintf(version_str, sizeof(version_str), "%lu.%lu.%lu.%lu", + hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE3_MASK, + HNAE3_FW_VERSION_BYTE3_SHIFT), + hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE2_MASK, + HNAE3_FW_VERSION_BYTE2_SHIFT), + hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE1_MASK, + HNAE3_FW_VERSION_BYTE1_SHIFT), + hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE0_MASK, + HNAE3_FW_VERSION_BYTE0_SHIFT)); + + return devlink_info_version_running_put(req, + DEVLINK_INFO_VERSION_GENERIC_FW, + version_str); +} + +static int hclge_devlink_reload_down(struct devlink *devlink, bool netns_change, + enum devlink_reload_action action, + enum devlink_reload_limit limit, + struct netlink_ext_ack *extack) +{ + struct hclge_devlink_priv *priv = devlink_priv(devlink); + struct hclge_dev *hdev = priv->hdev; + struct hnae3_handle *h = &hdev->vport->nic; + struct pci_dev *pdev = hdev->pdev; + int ret; + + if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) { + dev_err(&pdev->dev, "reset is handling\n"); + return -EBUSY; + } + + switch (action) { + case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: + rtnl_lock(); + ret = hdev->nic_client->ops->reset_notify(h, HNAE3_DOWN_CLIENT); + if (ret) { + rtnl_unlock(); + return ret; + } + + ret = hdev->nic_client->ops->reset_notify(h, + HNAE3_UNINIT_CLIENT); + rtnl_unlock(); + return ret; + default: + return -EOPNOTSUPP; + } +} + +static int hclge_devlink_reload_up(struct devlink *devlink, + enum devlink_reload_action action, + enum devlink_reload_limit limit, + u32 *actions_performed, + struct netlink_ext_ack *extack) +{ + struct hclge_devlink_priv *priv = devlink_priv(devlink); + struct hclge_dev *hdev = priv->hdev; + struct hnae3_handle *h = &hdev->vport->nic; + int ret; + + *actions_performed = BIT(action); + switch (action) { + case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: + rtnl_lock(); + ret = hdev->nic_client->ops->reset_notify(h, HNAE3_INIT_CLIENT); + if (ret) { + rtnl_unlock(); + return ret; + } + + ret = hdev->nic_client->ops->reset_notify(h, HNAE3_UP_CLIENT); + rtnl_unlock(); + return ret; + default: + return -EOPNOTSUPP; + } +} + +static const struct devlink_ops hclge_devlink_ops = { + .info_get = hclge_devlink_info_get, + .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT), + .reload_down = hclge_devlink_reload_down, + .reload_up = hclge_devlink_reload_up, +}; + +int hclge_devlink_init(struct hclge_dev *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + struct hclge_devlink_priv *priv; + struct devlink *devlink; + + devlink = devlink_alloc(&hclge_devlink_ops, + sizeof(struct hclge_devlink_priv), &pdev->dev); + if (!devlink) + return -ENOMEM; + + priv = devlink_priv(devlink); + priv->hdev = hdev; + hdev->devlink = devlink; + + devlink_set_features(devlink, DEVLINK_F_RELOAD); + devlink_register(devlink); + return 0; +} + +void hclge_devlink_uninit(struct hclge_dev *hdev) +{ + struct devlink *devlink = hdev->devlink; + + devlink_unregister(devlink); + + devlink_free(devlink); +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.h new file mode 100644 index 000000000..918be0450 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2021 Hisilicon Limited. */ + +#ifndef __HCLGE_DEVLINK_H +#define __HCLGE_DEVLINK_H + +#include "hclge_main.h" + +struct hclge_devlink_priv { + struct hclge_dev *hdev; +}; + +int hclge_devlink_init(struct hclge_dev *hdev); +void hclge_devlink_uninit(struct hclge_dev *hdev); +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c new file mode 100644 index 000000000..6efd768cc --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c @@ -0,0 +1,2940 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2016-2017 Hisilicon Limited. */ + +#include "hclge_err.h" + +static const struct hclge_hw_error hclge_imp_tcm_ecc_int[] = { + { + .int_msk = BIT(1), + .msg = "imp_itcm0_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(3), + .msg = "imp_itcm1_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(5), + .msg = "imp_itcm2_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(7), + .msg = "imp_itcm3_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(9), + .msg = "imp_dtcm0_mem0_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(11), + .msg = "imp_dtcm0_mem1_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(13), + .msg = "imp_dtcm1_mem0_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(15), + .msg = "imp_dtcm1_mem1_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(17), + .msg = "imp_itcm4_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + /* sentinel */ + } +}; + +static const struct hclge_hw_error hclge_cmdq_nic_mem_ecc_int[] = { + { + .int_msk = BIT(1), + .msg = "cmdq_nic_rx_depth_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(3), + .msg = "cmdq_nic_tx_depth_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(5), + .msg = "cmdq_nic_rx_tail_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(7), + .msg = "cmdq_nic_tx_tail_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(9), + .msg = "cmdq_nic_rx_head_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(11), + .msg = "cmdq_nic_tx_head_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(13), + .msg = "cmdq_nic_rx_addr_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(15), + .msg = "cmdq_nic_tx_addr_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(17), + .msg = "cmdq_rocee_rx_depth_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(19), + .msg = "cmdq_rocee_tx_depth_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(21), + .msg = "cmdq_rocee_rx_tail_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(23), + .msg = "cmdq_rocee_tx_tail_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(25), + .msg = "cmdq_rocee_rx_head_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(27), + .msg = "cmdq_rocee_tx_head_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(29), + .msg = "cmdq_rocee_rx_addr_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(31), + .msg = "cmdq_rocee_tx_addr_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + /* sentinel */ + } +}; + +static const struct hclge_hw_error hclge_tqp_int_ecc_int[] = { + { + .int_msk = BIT(6), + .msg = "tqp_int_cfg_even_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(7), + .msg = "tqp_int_cfg_odd_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(8), + .msg = "tqp_int_ctrl_even_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(9), + .msg = "tqp_int_ctrl_odd_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(10), + .msg = "tx_que_scan_int_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(11), + .msg = "rx_que_scan_int_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + /* sentinel */ + } +}; + +static const struct hclge_hw_error hclge_msix_sram_ecc_int[] = { + { + .int_msk = BIT(1), + .msg = "msix_nic_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(3), + .msg = "msix_rocee_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + /* sentinel */ + } +}; + +static const struct hclge_hw_error hclge_igu_int[] = { + { + .int_msk = BIT(0), + .msg = "igu_rx_buf0_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(2), + .msg = "igu_rx_buf1_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } +}; + +static const struct hclge_hw_error hclge_igu_egu_tnl_int[] = { + { + .int_msk = BIT(0), + .msg = "rx_buf_overflow", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(1), + .msg = "rx_stp_fifo_overflow", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(2), + .msg = "rx_stp_fifo_underflow", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(3), + .msg = "tx_buf_overflow", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(4), + .msg = "tx_buf_underrun", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(5), + .msg = "rx_stp_buf_overflow", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } +}; + +static const struct hclge_hw_error hclge_ncsi_err_int[] = { + { + .int_msk = BIT(1), + .msg = "ncsi_tx_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + /* sentinel */ + } +}; + +static const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st1[] = { + { + .int_msk = BIT(0), + .msg = "vf_vlan_ad_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(1), + .msg = "umv_mcast_group_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(2), + .msg = "umv_key_mem0_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(3), + .msg = "umv_key_mem1_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(4), + .msg = "umv_key_mem2_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(5), + .msg = "umv_key_mem3_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(6), + .msg = "umv_ad_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(7), + .msg = "rss_tc_mode_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(8), + .msg = "rss_idt_mem0_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(9), + .msg = "rss_idt_mem1_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(10), + .msg = "rss_idt_mem2_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(11), + .msg = "rss_idt_mem3_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(12), + .msg = "rss_idt_mem4_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(13), + .msg = "rss_idt_mem5_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(14), + .msg = "rss_idt_mem6_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(15), + .msg = "rss_idt_mem7_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(16), + .msg = "rss_idt_mem8_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(17), + .msg = "rss_idt_mem9_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(18), + .msg = "rss_idt_mem10_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(19), + .msg = "rss_idt_mem11_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(20), + .msg = "rss_idt_mem12_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(21), + .msg = "rss_idt_mem13_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(22), + .msg = "rss_idt_mem14_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(23), + .msg = "rss_idt_mem15_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(24), + .msg = "port_vlan_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(25), + .msg = "mcast_linear_table_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(26), + .msg = "mcast_result_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(27), + .msg = "flow_director_ad_mem0_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(28), + .msg = "flow_director_ad_mem1_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(29), + .msg = "rx_vlan_tag_memory_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(30), + .msg = "Tx_UP_mapping_config_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } +}; + +static const struct hclge_hw_error hclge_ppp_pf_abnormal_int[] = { + { + .int_msk = BIT(0), + .msg = "tx_vlan_tag_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(1), + .msg = "rss_list_tc_unassigned_queue_err", + .reset_level = HNAE3_NONE_RESET + }, { + /* sentinel */ + } +}; + +static const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st3[] = { + { + .int_msk = BIT(0), + .msg = "hfs_fifo_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(1), + .msg = "rslt_descr_fifo_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(2), + .msg = "tx_vlan_tag_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(3), + .msg = "FD_CN0_memory_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(4), + .msg = "FD_CN1_memory_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(5), + .msg = "GRO_AD_memory_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } +}; + +static const struct hclge_hw_error hclge_tm_sch_rint[] = { + { + .int_msk = BIT(1), + .msg = "tm_sch_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(2), + .msg = "tm_sch_port_shap_sub_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(3), + .msg = "tm_sch_port_shap_sub_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(4), + .msg = "tm_sch_pg_pshap_sub_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(5), + .msg = "tm_sch_pg_pshap_sub_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(6), + .msg = "tm_sch_pg_cshap_sub_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(7), + .msg = "tm_sch_pg_cshap_sub_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(8), + .msg = "tm_sch_pri_pshap_sub_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(9), + .msg = "tm_sch_pri_pshap_sub_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(10), + .msg = "tm_sch_pri_cshap_sub_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(11), + .msg = "tm_sch_pri_cshap_sub_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(12), + .msg = "tm_sch_port_shap_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(13), + .msg = "tm_sch_port_shap_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(14), + .msg = "tm_sch_pg_pshap_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(15), + .msg = "tm_sch_pg_pshap_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(16), + .msg = "tm_sch_pg_cshap_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(17), + .msg = "tm_sch_pg_cshap_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(18), + .msg = "tm_sch_pri_pshap_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(19), + .msg = "tm_sch_pri_pshap_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(20), + .msg = "tm_sch_pri_cshap_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(21), + .msg = "tm_sch_pri_cshap_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(22), + .msg = "tm_sch_rq_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(23), + .msg = "tm_sch_rq_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(24), + .msg = "tm_sch_nq_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(25), + .msg = "tm_sch_nq_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(26), + .msg = "tm_sch_roce_up_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(27), + .msg = "tm_sch_roce_up_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(28), + .msg = "tm_sch_rcb_byte_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(29), + .msg = "tm_sch_rcb_byte_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(30), + .msg = "tm_sch_ssu_byte_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(31), + .msg = "tm_sch_ssu_byte_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } +}; + +static const struct hclge_hw_error hclge_qcn_fifo_rint[] = { + { + .int_msk = BIT(0), + .msg = "qcn_shap_gp0_sch_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(1), + .msg = "qcn_shap_gp0_sch_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(2), + .msg = "qcn_shap_gp1_sch_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(3), + .msg = "qcn_shap_gp1_sch_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(4), + .msg = "qcn_shap_gp2_sch_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(5), + .msg = "qcn_shap_gp2_sch_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(6), + .msg = "qcn_shap_gp3_sch_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(7), + .msg = "qcn_shap_gp3_sch_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(8), + .msg = "qcn_shap_gp0_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(9), + .msg = "qcn_shap_gp0_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(10), + .msg = "qcn_shap_gp1_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(11), + .msg = "qcn_shap_gp1_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(12), + .msg = "qcn_shap_gp2_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(13), + .msg = "qcn_shap_gp2_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(14), + .msg = "qcn_shap_gp3_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(15), + .msg = "qcn_shap_gp3_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(16), + .msg = "qcn_byte_info_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(17), + .msg = "qcn_byte_info_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } +}; + +static const struct hclge_hw_error hclge_qcn_ecc_rint[] = { + { + .int_msk = BIT(1), + .msg = "qcn_byte_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(3), + .msg = "qcn_time_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(5), + .msg = "qcn_fb_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(7), + .msg = "qcn_link_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(9), + .msg = "qcn_rate_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(11), + .msg = "qcn_tmplt_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(13), + .msg = "qcn_shap_cfg_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(15), + .msg = "qcn_gp0_barrel_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(17), + .msg = "qcn_gp1_barrel_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(19), + .msg = "qcn_gp2_barrel_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(21), + .msg = "qcn_gp3_barral_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } +}; + +static const struct hclge_hw_error hclge_mac_afifo_tnl_int[] = { + { + .int_msk = BIT(0), + .msg = "egu_cge_afifo_ecc_1bit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(1), + .msg = "egu_cge_afifo_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(2), + .msg = "egu_lge_afifo_ecc_1bit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(3), + .msg = "egu_lge_afifo_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(4), + .msg = "cge_igu_afifo_ecc_1bit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(5), + .msg = "cge_igu_afifo_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(6), + .msg = "lge_igu_afifo_ecc_1bit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(7), + .msg = "lge_igu_afifo_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(8), + .msg = "cge_igu_afifo_overflow_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(9), + .msg = "lge_igu_afifo_overflow_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(10), + .msg = "egu_cge_afifo_underrun_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(11), + .msg = "egu_lge_afifo_underrun_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(12), + .msg = "egu_ge_afifo_underrun_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(13), + .msg = "ge_igu_afifo_overflow_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } +}; + +static const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st2[] = { + { + .int_msk = BIT(13), + .msg = "rpu_rx_pkt_bit32_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(14), + .msg = "rpu_rx_pkt_bit33_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(15), + .msg = "rpu_rx_pkt_bit34_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(16), + .msg = "rpu_rx_pkt_bit35_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(17), + .msg = "rcb_tx_ring_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(18), + .msg = "rcb_rx_ring_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(19), + .msg = "rcb_tx_fbd_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(20), + .msg = "rcb_rx_ebd_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(21), + .msg = "rcb_tso_info_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(22), + .msg = "rcb_tx_int_info_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(23), + .msg = "rcb_rx_int_info_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(24), + .msg = "tpu_tx_pkt_0_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(25), + .msg = "tpu_tx_pkt_1_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(26), + .msg = "rd_bus_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(27), + .msg = "wr_bus_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(28), + .msg = "reg_search_miss", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(29), + .msg = "rx_q_search_miss", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(30), + .msg = "ooo_ecc_err_detect", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(31), + .msg = "ooo_ecc_err_multpl", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } +}; + +static const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st3[] = { + { + .int_msk = BIT(4), + .msg = "gro_bd_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(5), + .msg = "gro_context_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(6), + .msg = "rx_stash_cfg_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(7), + .msg = "axi_rd_fbd_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } +}; + +static const struct hclge_hw_error hclge_ppu_pf_abnormal_int[] = { + { + .int_msk = BIT(0), + .msg = "over_8bd_no_fe", + .reset_level = HNAE3_FUNC_RESET + }, { + .int_msk = BIT(1), + .msg = "tso_mss_cmp_min_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(2), + .msg = "tso_mss_cmp_max_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(3), + .msg = "tx_rd_fbd_poison", + .reset_level = HNAE3_FUNC_RESET + }, { + .int_msk = BIT(4), + .msg = "rx_rd_ebd_poison", + .reset_level = HNAE3_FUNC_RESET + }, { + .int_msk = BIT(5), + .msg = "buf_wait_timeout", + .reset_level = HNAE3_NONE_RESET + }, { + /* sentinel */ + } +}; + +static const struct hclge_hw_error hclge_ssu_com_err_int[] = { + { + .int_msk = BIT(0), + .msg = "buf_sum_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(1), + .msg = "ppp_mb_num_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(2), + .msg = "ppp_mbid_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(3), + .msg = "ppp_rlt_mac_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(4), + .msg = "ppp_rlt_host_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(5), + .msg = "cks_edit_position_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(6), + .msg = "cks_edit_condition_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(7), + .msg = "vlan_edit_condition_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(8), + .msg = "vlan_num_ot_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(9), + .msg = "vlan_num_in_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } +}; + +#define HCLGE_SSU_MEM_ECC_ERR(x) \ +{ \ + .int_msk = BIT(x), \ + .msg = "ssu_mem" #x "_ecc_mbit_err", \ + .reset_level = HNAE3_GLOBAL_RESET \ +} + +static const struct hclge_hw_error hclge_ssu_mem_ecc_err_int[] = { + HCLGE_SSU_MEM_ECC_ERR(0), + HCLGE_SSU_MEM_ECC_ERR(1), + HCLGE_SSU_MEM_ECC_ERR(2), + HCLGE_SSU_MEM_ECC_ERR(3), + HCLGE_SSU_MEM_ECC_ERR(4), + HCLGE_SSU_MEM_ECC_ERR(5), + HCLGE_SSU_MEM_ECC_ERR(6), + HCLGE_SSU_MEM_ECC_ERR(7), + HCLGE_SSU_MEM_ECC_ERR(8), + HCLGE_SSU_MEM_ECC_ERR(9), + HCLGE_SSU_MEM_ECC_ERR(10), + HCLGE_SSU_MEM_ECC_ERR(11), + HCLGE_SSU_MEM_ECC_ERR(12), + HCLGE_SSU_MEM_ECC_ERR(13), + HCLGE_SSU_MEM_ECC_ERR(14), + HCLGE_SSU_MEM_ECC_ERR(15), + HCLGE_SSU_MEM_ECC_ERR(16), + HCLGE_SSU_MEM_ECC_ERR(17), + HCLGE_SSU_MEM_ECC_ERR(18), + HCLGE_SSU_MEM_ECC_ERR(19), + HCLGE_SSU_MEM_ECC_ERR(20), + HCLGE_SSU_MEM_ECC_ERR(21), + HCLGE_SSU_MEM_ECC_ERR(22), + HCLGE_SSU_MEM_ECC_ERR(23), + HCLGE_SSU_MEM_ECC_ERR(24), + HCLGE_SSU_MEM_ECC_ERR(25), + HCLGE_SSU_MEM_ECC_ERR(26), + HCLGE_SSU_MEM_ECC_ERR(27), + HCLGE_SSU_MEM_ECC_ERR(28), + HCLGE_SSU_MEM_ECC_ERR(29), + HCLGE_SSU_MEM_ECC_ERR(30), + HCLGE_SSU_MEM_ECC_ERR(31), + { /* sentinel */ } +}; + +static const struct hclge_hw_error hclge_ssu_port_based_err_int[] = { + { + .int_msk = BIT(0), + .msg = "roc_pkt_without_key_port", + .reset_level = HNAE3_FUNC_RESET + }, { + .int_msk = BIT(1), + .msg = "tpu_pkt_without_key_port", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(2), + .msg = "igu_pkt_without_key_port", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(3), + .msg = "roc_eof_mis_match_port", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(4), + .msg = "tpu_eof_mis_match_port", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(5), + .msg = "igu_eof_mis_match_port", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(6), + .msg = "roc_sof_mis_match_port", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(7), + .msg = "tpu_sof_mis_match_port", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(8), + .msg = "igu_sof_mis_match_port", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(11), + .msg = "ets_rd_int_rx_port", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(12), + .msg = "ets_wr_int_rx_port", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(13), + .msg = "ets_rd_int_tx_port", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(14), + .msg = "ets_wr_int_tx_port", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } +}; + +static const struct hclge_hw_error hclge_ssu_fifo_overflow_int[] = { + { + .int_msk = BIT(0), + .msg = "ig_mac_inf_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(1), + .msg = "ig_host_inf_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(2), + .msg = "ig_roc_buf_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(3), + .msg = "ig_host_data_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(4), + .msg = "ig_host_key_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(5), + .msg = "tx_qcn_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(6), + .msg = "rx_qcn_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(7), + .msg = "tx_pf_rd_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(8), + .msg = "rx_pf_rd_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(9), + .msg = "qm_eof_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(10), + .msg = "mb_rlt_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(11), + .msg = "dup_uncopy_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(12), + .msg = "dup_cnt_rd_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(13), + .msg = "dup_cnt_drop_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(14), + .msg = "dup_cnt_wrb_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(15), + .msg = "host_cmd_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(16), + .msg = "mac_cmd_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(17), + .msg = "host_cmd_bitmap_empty_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(18), + .msg = "mac_cmd_bitmap_empty_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(19), + .msg = "dup_bitmap_empty_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(20), + .msg = "out_queue_bitmap_empty_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(21), + .msg = "bank2_bitmap_empty_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(22), + .msg = "bank1_bitmap_empty_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(23), + .msg = "bank0_bitmap_empty_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } +}; + +static const struct hclge_hw_error hclge_ssu_ets_tcg_int[] = { + { + .int_msk = BIT(0), + .msg = "ets_rd_int_rx_tcg", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(1), + .msg = "ets_wr_int_rx_tcg", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(2), + .msg = "ets_rd_int_tx_tcg", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(3), + .msg = "ets_wr_int_tx_tcg", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } +}; + +static const struct hclge_hw_error hclge_ssu_port_based_pf_int[] = { + { + .int_msk = BIT(0), + .msg = "roc_pkt_without_key_port", + .reset_level = HNAE3_FUNC_RESET + }, { + .int_msk = BIT(9), + .msg = "low_water_line_err_port", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(10), + .msg = "hi_water_line_err_port", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } +}; + +static const struct hclge_hw_error hclge_rocee_qmm_ovf_err_int[] = { + { + .int_msk = 0, + .msg = "rocee qmm ovf: sgid invalid err" + }, { + .int_msk = 0x4, + .msg = "rocee qmm ovf: sgid ovf err" + }, { + .int_msk = 0x8, + .msg = "rocee qmm ovf: smac invalid err" + }, { + .int_msk = 0xC, + .msg = "rocee qmm ovf: smac ovf err" + }, { + .int_msk = 0x10, + .msg = "rocee qmm ovf: cqc invalid err" + }, { + .int_msk = 0x11, + .msg = "rocee qmm ovf: cqc ovf err" + }, { + .int_msk = 0x12, + .msg = "rocee qmm ovf: cqc hopnum err" + }, { + .int_msk = 0x13, + .msg = "rocee qmm ovf: cqc ba0 err" + }, { + .int_msk = 0x14, + .msg = "rocee qmm ovf: srqc invalid err" + }, { + .int_msk = 0x15, + .msg = "rocee qmm ovf: srqc ovf err" + }, { + .int_msk = 0x16, + .msg = "rocee qmm ovf: srqc hopnum err" + }, { + .int_msk = 0x17, + .msg = "rocee qmm ovf: srqc ba0 err" + }, { + .int_msk = 0x18, + .msg = "rocee qmm ovf: mpt invalid err" + }, { + .int_msk = 0x19, + .msg = "rocee qmm ovf: mpt ovf err" + }, { + .int_msk = 0x1A, + .msg = "rocee qmm ovf: mpt hopnum err" + }, { + .int_msk = 0x1B, + .msg = "rocee qmm ovf: mpt ba0 err" + }, { + .int_msk = 0x1C, + .msg = "rocee qmm ovf: qpc invalid err" + }, { + .int_msk = 0x1D, + .msg = "rocee qmm ovf: qpc ovf err" + }, { + .int_msk = 0x1E, + .msg = "rocee qmm ovf: qpc hopnum err" + }, { + .int_msk = 0x1F, + .msg = "rocee qmm ovf: qpc ba0 err" + }, { + /* sentinel */ + } +}; + +static const struct hclge_hw_module_id hclge_hw_module_id_st[] = { + { + .module_id = MODULE_NONE, + .msg = "MODULE_NONE" + }, { + .module_id = MODULE_BIOS_COMMON, + .msg = "MODULE_BIOS_COMMON" + }, { + .module_id = MODULE_GE, + .msg = "MODULE_GE" + }, { + .module_id = MODULE_IGU_EGU, + .msg = "MODULE_IGU_EGU" + }, { + .module_id = MODULE_LGE, + .msg = "MODULE_LGE" + }, { + .module_id = MODULE_NCSI, + .msg = "MODULE_NCSI" + }, { + .module_id = MODULE_PPP, + .msg = "MODULE_PPP" + }, { + .module_id = MODULE_QCN, + .msg = "MODULE_QCN" + }, { + .module_id = MODULE_RCB_RX, + .msg = "MODULE_RCB_RX" + }, { + .module_id = MODULE_RTC, + .msg = "MODULE_RTC" + }, { + .module_id = MODULE_SSU, + .msg = "MODULE_SSU" + }, { + .module_id = MODULE_TM, + .msg = "MODULE_TM" + }, { + .module_id = MODULE_RCB_TX, + .msg = "MODULE_RCB_TX" + }, { + .module_id = MODULE_TXDMA, + .msg = "MODULE_TXDMA" + }, { + .module_id = MODULE_MASTER, + .msg = "MODULE_MASTER" + }, { + .module_id = MODULE_HIMAC, + .msg = "MODULE_HIMAC" + }, { + .module_id = MODULE_ROCEE_TOP, + .msg = "MODULE_ROCEE_TOP" + }, { + .module_id = MODULE_ROCEE_TIMER, + .msg = "MODULE_ROCEE_TIMER" + }, { + .module_id = MODULE_ROCEE_MDB, + .msg = "MODULE_ROCEE_MDB" + }, { + .module_id = MODULE_ROCEE_TSP, + .msg = "MODULE_ROCEE_TSP" + }, { + .module_id = MODULE_ROCEE_TRP, + .msg = "MODULE_ROCEE_TRP" + }, { + .module_id = MODULE_ROCEE_SCC, + .msg = "MODULE_ROCEE_SCC" + }, { + .module_id = MODULE_ROCEE_CAEP, + .msg = "MODULE_ROCEE_CAEP" + }, { + .module_id = MODULE_ROCEE_GEN_AC, + .msg = "MODULE_ROCEE_GEN_AC" + }, { + .module_id = MODULE_ROCEE_QMM, + .msg = "MODULE_ROCEE_QMM" + }, { + .module_id = MODULE_ROCEE_LSAN, + .msg = "MODULE_ROCEE_LSAN" + } +}; + +static const struct hclge_hw_type_id hclge_hw_type_id_st[] = { + { + .type_id = NONE_ERROR, + .msg = "none_error" + }, { + .type_id = FIFO_ERROR, + .msg = "fifo_error" + }, { + .type_id = MEMORY_ERROR, + .msg = "memory_error" + }, { + .type_id = POISON_ERROR, + .msg = "poison_error" + }, { + .type_id = MSIX_ECC_ERROR, + .msg = "msix_ecc_error" + }, { + .type_id = TQP_INT_ECC_ERROR, + .msg = "tqp_int_ecc_error" + }, { + .type_id = PF_ABNORMAL_INT_ERROR, + .msg = "pf_abnormal_int_error" + }, { + .type_id = MPF_ABNORMAL_INT_ERROR, + .msg = "mpf_abnormal_int_error" + }, { + .type_id = COMMON_ERROR, + .msg = "common_error" + }, { + .type_id = PORT_ERROR, + .msg = "port_error" + }, { + .type_id = ETS_ERROR, + .msg = "ets_error" + }, { + .type_id = NCSI_ERROR, + .msg = "ncsi_error" + }, { + .type_id = GLB_ERROR, + .msg = "glb_error" + }, { + .type_id = LINK_ERROR, + .msg = "link_error" + }, { + .type_id = PTP_ERROR, + .msg = "ptp_error" + }, { + .type_id = ROCEE_NORMAL_ERR, + .msg = "rocee_normal_error" + }, { + .type_id = ROCEE_OVF_ERR, + .msg = "rocee_ovf_error" + }, { + .type_id = ROCEE_BUS_ERR, + .msg = "rocee_bus_error" + }, +}; + +static void hclge_log_error(struct device *dev, char *reg, + const struct hclge_hw_error *err, + u32 err_sts, unsigned long *reset_requests) +{ + while (err->msg) { + if (err->int_msk & err_sts) { + dev_err(dev, "%s %s found [error status=0x%x]\n", + reg, err->msg, err_sts); + if (err->reset_level && + err->reset_level != HNAE3_NONE_RESET) + set_bit(err->reset_level, reset_requests); + } + err++; + } +} + +/* hclge_cmd_query_error: read the error information + * @hdev: pointer to struct hclge_dev + * @desc: descriptor for describing the command + * @cmd: command opcode + * @flag: flag for extended command structure + * + * This function query the error info from hw register/s using command + */ +static int hclge_cmd_query_error(struct hclge_dev *hdev, + struct hclge_desc *desc, u32 cmd, u16 flag) +{ + struct device *dev = &hdev->pdev->dev; + int desc_num = 1; + int ret; + + hclge_cmd_setup_basic_desc(&desc[0], cmd, true); + if (flag) { + desc[0].flag |= cpu_to_le16(flag); + hclge_cmd_setup_basic_desc(&desc[1], cmd, true); + desc_num = 2; + } + + ret = hclge_cmd_send(&hdev->hw, &desc[0], desc_num); + if (ret) + dev_err(dev, "query error cmd failed (%d)\n", ret); + + return ret; +} + +static int hclge_clear_mac_tnl_int(struct hclge_dev *hdev) +{ + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CLEAR_MAC_TNL_INT, false); + desc.data[0] = cpu_to_le32(HCLGE_MAC_TNL_INT_CLR); + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_config_common_hw_err_int(struct hclge_dev *hdev, bool en) +{ + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc[2]; + int ret; + + /* configure common error interrupts */ + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_COMMON_ECC_INT_CFG, false); + desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[1], HCLGE_COMMON_ECC_INT_CFG, false); + + if (en) { + desc[0].data[0] = cpu_to_le32(HCLGE_IMP_TCM_ECC_ERR_INT_EN); + desc[0].data[2] = cpu_to_le32(HCLGE_CMDQ_NIC_ECC_ERR_INT_EN | + HCLGE_CMDQ_ROCEE_ECC_ERR_INT_EN); + desc[0].data[3] = cpu_to_le32(HCLGE_IMP_RD_POISON_ERR_INT_EN); + desc[0].data[4] = cpu_to_le32(HCLGE_TQP_ECC_ERR_INT_EN | + HCLGE_MSIX_SRAM_ECC_ERR_INT_EN); + desc[0].data[5] = cpu_to_le32(HCLGE_IMP_ITCM4_ECC_ERR_INT_EN); + } + + desc[1].data[0] = cpu_to_le32(HCLGE_IMP_TCM_ECC_ERR_INT_EN_MASK); + desc[1].data[2] = cpu_to_le32(HCLGE_CMDQ_NIC_ECC_ERR_INT_EN_MASK | + HCLGE_CMDQ_ROCEE_ECC_ERR_INT_EN_MASK); + desc[1].data[3] = cpu_to_le32(HCLGE_IMP_RD_POISON_ERR_INT_EN_MASK); + desc[1].data[4] = cpu_to_le32(HCLGE_TQP_ECC_ERR_INT_EN_MASK | + HCLGE_MSIX_SRAM_ECC_ERR_INT_EN_MASK); + desc[1].data[5] = cpu_to_le32(HCLGE_IMP_ITCM4_ECC_ERR_INT_EN_MASK); + + ret = hclge_cmd_send(&hdev->hw, &desc[0], 2); + if (ret) + dev_err(dev, + "fail(%d) to configure common err interrupts\n", ret); + + return ret; +} + +static int hclge_config_ncsi_hw_err_int(struct hclge_dev *hdev, bool en) +{ + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc; + int ret; + + if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) + return 0; + + /* configure NCSI error interrupts */ + hclge_cmd_setup_basic_desc(&desc, HCLGE_NCSI_INT_EN, false); + if (en) + desc.data[0] = cpu_to_le32(HCLGE_NCSI_ERR_INT_EN); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(dev, + "fail(%d) to configure NCSI error interrupts\n", ret); + + return ret; +} + +static int hclge_config_igu_egu_hw_err_int(struct hclge_dev *hdev, bool en) +{ + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc; + int ret; + + /* configure IGU,EGU error interrupts */ + hclge_cmd_setup_basic_desc(&desc, HCLGE_IGU_COMMON_INT_EN, false); + desc.data[0] = cpu_to_le32(HCLGE_IGU_ERR_INT_TYPE); + if (en) + desc.data[0] |= cpu_to_le32(HCLGE_IGU_ERR_INT_EN); + + desc.data[1] = cpu_to_le32(HCLGE_IGU_ERR_INT_EN_MASK); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(dev, + "fail(%d) to configure IGU common interrupts\n", ret); + return ret; + } + + hclge_cmd_setup_basic_desc(&desc, HCLGE_IGU_EGU_TNL_INT_EN, false); + if (en) + desc.data[0] = cpu_to_le32(HCLGE_IGU_TNL_ERR_INT_EN); + + desc.data[1] = cpu_to_le32(HCLGE_IGU_TNL_ERR_INT_EN_MASK); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(dev, + "fail(%d) to configure IGU-EGU TNL interrupts\n", ret); + return ret; + } + + ret = hclge_config_ncsi_hw_err_int(hdev, en); + + return ret; +} + +static int hclge_config_ppp_error_interrupt(struct hclge_dev *hdev, u32 cmd, + bool en) +{ + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc[2]; + int ret; + + /* configure PPP error interrupts */ + hclge_cmd_setup_basic_desc(&desc[0], cmd, false); + desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[1], cmd, false); + + if (cmd == HCLGE_PPP_CMD0_INT_CMD) { + if (en) { + desc[0].data[0] = + cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT0_EN); + desc[0].data[1] = + cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT1_EN); + desc[0].data[4] = cpu_to_le32(HCLGE_PPP_PF_ERR_INT_EN); + } + + desc[1].data[0] = + cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT0_EN_MASK); + desc[1].data[1] = + cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT1_EN_MASK); + if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) + desc[1].data[2] = + cpu_to_le32(HCLGE_PPP_PF_ERR_INT_EN_MASK); + } else if (cmd == HCLGE_PPP_CMD1_INT_CMD) { + if (en) { + desc[0].data[0] = + cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT2_EN); + desc[0].data[1] = + cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT3_EN); + } + + desc[1].data[0] = + cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT2_EN_MASK); + desc[1].data[1] = + cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT3_EN_MASK); + } + + ret = hclge_cmd_send(&hdev->hw, &desc[0], 2); + if (ret) + dev_err(dev, "fail(%d) to configure PPP error intr\n", ret); + + return ret; +} + +static int hclge_config_ppp_hw_err_int(struct hclge_dev *hdev, bool en) +{ + int ret; + + ret = hclge_config_ppp_error_interrupt(hdev, HCLGE_PPP_CMD0_INT_CMD, + en); + if (ret) + return ret; + + ret = hclge_config_ppp_error_interrupt(hdev, HCLGE_PPP_CMD1_INT_CMD, + en); + + return ret; +} + +static int hclge_config_tm_hw_err_int(struct hclge_dev *hdev, bool en) +{ + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc; + int ret; + + /* configure TM SCH hw errors */ + hclge_cmd_setup_basic_desc(&desc, HCLGE_TM_SCH_ECC_INT_EN, false); + if (en) + desc.data[0] = cpu_to_le32(HCLGE_TM_SCH_ECC_ERR_INT_EN); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(dev, "fail(%d) to configure TM SCH errors\n", ret); + return ret; + } + + /* configure TM QCN hw errors */ + hclge_cmd_setup_basic_desc(&desc, HCLGE_TM_QCN_MEM_INT_CFG, false); + desc.data[0] = cpu_to_le32(HCLGE_TM_QCN_ERR_INT_TYPE); + if (en) { + desc.data[0] |= cpu_to_le32(HCLGE_TM_QCN_FIFO_INT_EN); + desc.data[1] = cpu_to_le32(HCLGE_TM_QCN_MEM_ERR_INT_EN); + } + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(dev, + "fail(%d) to configure TM QCN mem errors\n", ret); + + return ret; +} + +static int hclge_config_mac_err_int(struct hclge_dev *hdev, bool en) +{ + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc; + int ret; + + /* configure MAC common error interrupts */ + hclge_cmd_setup_basic_desc(&desc, HCLGE_MAC_COMMON_INT_EN, false); + if (en) + desc.data[0] = cpu_to_le32(HCLGE_MAC_COMMON_ERR_INT_EN); + + desc.data[1] = cpu_to_le32(HCLGE_MAC_COMMON_ERR_INT_EN_MASK); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(dev, + "fail(%d) to configure MAC COMMON error intr\n", ret); + + return ret; +} + +int hclge_config_mac_tnl_int(struct hclge_dev *hdev, bool en) +{ + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_TNL_INT_EN, false); + if (en) + desc.data[0] = cpu_to_le32(HCLGE_MAC_TNL_INT_EN); + else + desc.data[0] = 0; + + desc.data[1] = cpu_to_le32(HCLGE_MAC_TNL_INT_EN_MASK); + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_config_ppu_error_interrupts(struct hclge_dev *hdev, u32 cmd, + bool en) +{ + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc[2]; + int desc_num = 1; + int ret; + + /* configure PPU error interrupts */ + if (cmd == HCLGE_PPU_MPF_ECC_INT_CMD) { + hclge_cmd_setup_basic_desc(&desc[0], cmd, false); + desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[1], cmd, false); + if (en) { + desc[0].data[0] = + cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT0_EN); + desc[0].data[1] = + cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT1_EN); + desc[1].data[3] = + cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT3_EN); + desc[1].data[4] = + cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT2_EN); + } + + desc[1].data[0] = + cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT0_EN_MASK); + desc[1].data[1] = + cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT1_EN_MASK); + desc[1].data[2] = + cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT2_EN_MASK); + desc[1].data[3] |= + cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT3_EN_MASK); + desc_num = 2; + } else if (cmd == HCLGE_PPU_MPF_OTHER_INT_CMD) { + hclge_cmd_setup_basic_desc(&desc[0], cmd, false); + if (en) + desc[0].data[0] = + cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT2_EN2); + + desc[0].data[2] = + cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT2_EN2_MASK); + } else if (cmd == HCLGE_PPU_PF_OTHER_INT_CMD) { + hclge_cmd_setup_basic_desc(&desc[0], cmd, false); + if (en) + desc[0].data[0] = + cpu_to_le32(HCLGE_PPU_PF_ABNORMAL_INT_EN); + + desc[0].data[2] = + cpu_to_le32(HCLGE_PPU_PF_ABNORMAL_INT_EN_MASK); + } else { + dev_err(dev, "Invalid cmd to configure PPU error interrupts\n"); + return -EINVAL; + } + + ret = hclge_cmd_send(&hdev->hw, &desc[0], desc_num); + + return ret; +} + +static int hclge_config_ppu_hw_err_int(struct hclge_dev *hdev, bool en) +{ + struct device *dev = &hdev->pdev->dev; + int ret; + + ret = hclge_config_ppu_error_interrupts(hdev, HCLGE_PPU_MPF_ECC_INT_CMD, + en); + if (ret) { + dev_err(dev, "fail(%d) to configure PPU MPF ECC error intr\n", + ret); + return ret; + } + + ret = hclge_config_ppu_error_interrupts(hdev, + HCLGE_PPU_MPF_OTHER_INT_CMD, + en); + if (ret) { + dev_err(dev, "fail(%d) to configure PPU MPF other intr\n", ret); + return ret; + } + + ret = hclge_config_ppu_error_interrupts(hdev, + HCLGE_PPU_PF_OTHER_INT_CMD, en); + if (ret) + dev_err(dev, "fail(%d) to configure PPU PF error interrupts\n", + ret); + return ret; +} + +static int hclge_config_ssu_hw_err_int(struct hclge_dev *hdev, bool en) +{ + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc[2]; + int ret; + + /* configure SSU ecc error interrupts */ + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_SSU_ECC_INT_CMD, false); + desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[1], HCLGE_SSU_ECC_INT_CMD, false); + if (en) { + desc[0].data[0] = cpu_to_le32(HCLGE_SSU_1BIT_ECC_ERR_INT_EN); + desc[0].data[1] = + cpu_to_le32(HCLGE_SSU_MULTI_BIT_ECC_ERR_INT_EN); + desc[0].data[4] = cpu_to_le32(HCLGE_SSU_BIT32_ECC_ERR_INT_EN); + } + + desc[1].data[0] = cpu_to_le32(HCLGE_SSU_1BIT_ECC_ERR_INT_EN_MASK); + desc[1].data[1] = cpu_to_le32(HCLGE_SSU_MULTI_BIT_ECC_ERR_INT_EN_MASK); + desc[1].data[2] = cpu_to_le32(HCLGE_SSU_BIT32_ECC_ERR_INT_EN_MASK); + + ret = hclge_cmd_send(&hdev->hw, &desc[0], 2); + if (ret) { + dev_err(dev, + "fail(%d) to configure SSU ECC error interrupt\n", ret); + return ret; + } + + /* configure SSU common error interrupts */ + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_SSU_COMMON_INT_CMD, false); + desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[1], HCLGE_SSU_COMMON_INT_CMD, false); + + if (en) { + if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) + desc[0].data[0] = + cpu_to_le32(HCLGE_SSU_COMMON_INT_EN); + else + desc[0].data[0] = + cpu_to_le32(HCLGE_SSU_COMMON_INT_EN & ~BIT(5)); + desc[0].data[1] = cpu_to_le32(HCLGE_SSU_PORT_BASED_ERR_INT_EN); + desc[0].data[2] = + cpu_to_le32(HCLGE_SSU_FIFO_OVERFLOW_ERR_INT_EN); + } + + desc[1].data[0] = cpu_to_le32(HCLGE_SSU_COMMON_INT_EN_MASK | + HCLGE_SSU_PORT_BASED_ERR_INT_EN_MASK); + desc[1].data[1] = cpu_to_le32(HCLGE_SSU_FIFO_OVERFLOW_ERR_INT_EN_MASK); + + ret = hclge_cmd_send(&hdev->hw, &desc[0], 2); + if (ret) + dev_err(dev, + "fail(%d) to configure SSU COMMON error intr\n", ret); + + return ret; +} + +/* hclge_query_bd_num: query number of buffer descriptors + * @hdev: pointer to struct hclge_dev + * @is_ras: true for ras, false for msix + * @mpf_bd_num: number of main PF interrupt buffer descriptors + * @pf_bd_num: number of not main PF interrupt buffer descriptors + * + * This function querys number of mpf and pf buffer descriptors. + */ +static int hclge_query_bd_num(struct hclge_dev *hdev, bool is_ras, + u32 *mpf_bd_num, u32 *pf_bd_num) +{ + struct device *dev = &hdev->pdev->dev; + u32 mpf_min_bd_num, pf_min_bd_num; + enum hclge_opcode_type opcode; + struct hclge_desc desc_bd; + int ret; + + if (is_ras) { + opcode = HCLGE_QUERY_RAS_INT_STS_BD_NUM; + mpf_min_bd_num = HCLGE_MPF_RAS_INT_MIN_BD_NUM; + pf_min_bd_num = HCLGE_PF_RAS_INT_MIN_BD_NUM; + } else { + opcode = HCLGE_QUERY_MSIX_INT_STS_BD_NUM; + mpf_min_bd_num = HCLGE_MPF_MSIX_INT_MIN_BD_NUM; + pf_min_bd_num = HCLGE_PF_MSIX_INT_MIN_BD_NUM; + } + + hclge_cmd_setup_basic_desc(&desc_bd, opcode, true); + ret = hclge_cmd_send(&hdev->hw, &desc_bd, 1); + if (ret) { + dev_err(dev, "fail(%d) to query msix int status bd num\n", + ret); + return ret; + } + + *mpf_bd_num = le32_to_cpu(desc_bd.data[0]); + *pf_bd_num = le32_to_cpu(desc_bd.data[1]); + if (*mpf_bd_num < mpf_min_bd_num || *pf_bd_num < pf_min_bd_num) { + dev_err(dev, "Invalid bd num: mpf(%u), pf(%u)\n", + *mpf_bd_num, *pf_bd_num); + return -EINVAL; + } + + return 0; +} + +/* hclge_handle_mpf_ras_error: handle all main PF RAS errors + * @hdev: pointer to struct hclge_dev + * @desc: descriptor for describing the command + * @num: number of extended command structures + * + * This function handles all the main PF RAS errors in the + * hw register/s using command. + */ +static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev, + struct hclge_desc *desc, + int num) +{ + struct hnae3_ae_dev *ae_dev = hdev->ae_dev; + struct device *dev = &hdev->pdev->dev; + __le32 *desc_data; + u32 status; + int ret; + + /* query all main PF RAS errors */ + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_MPF_RAS_INT, + true); + ret = hclge_cmd_send(&hdev->hw, &desc[0], num); + if (ret) { + dev_err(dev, "query all mpf ras int cmd failed (%d)\n", ret); + return ret; + } + + /* log HNS common errors */ + status = le32_to_cpu(desc[0].data[0]); + if (status) + hclge_log_error(dev, "IMP_TCM_ECC_INT_STS", + &hclge_imp_tcm_ecc_int[0], status, + &ae_dev->hw_err_reset_req); + + status = le32_to_cpu(desc[0].data[1]); + if (status) + hclge_log_error(dev, "CMDQ_MEM_ECC_INT_STS", + &hclge_cmdq_nic_mem_ecc_int[0], status, + &ae_dev->hw_err_reset_req); + + if ((le32_to_cpu(desc[0].data[2])) & BIT(0)) + dev_warn(dev, "imp_rd_data_poison_err found\n"); + + status = le32_to_cpu(desc[0].data[3]); + if (status) + hclge_log_error(dev, "TQP_INT_ECC_INT_STS", + &hclge_tqp_int_ecc_int[0], status, + &ae_dev->hw_err_reset_req); + + status = le32_to_cpu(desc[0].data[4]); + if (status) + hclge_log_error(dev, "MSIX_ECC_INT_STS", + &hclge_msix_sram_ecc_int[0], status, + &ae_dev->hw_err_reset_req); + + /* log SSU(Storage Switch Unit) errors */ + desc_data = (__le32 *)&desc[2]; + status = le32_to_cpu(*(desc_data + 2)); + if (status) + hclge_log_error(dev, "SSU_ECC_MULTI_BIT_INT_0", + &hclge_ssu_mem_ecc_err_int[0], status, + &ae_dev->hw_err_reset_req); + + status = le32_to_cpu(*(desc_data + 3)) & BIT(0); + if (status) { + dev_err(dev, "SSU_ECC_MULTI_BIT_INT_1 ssu_mem32_ecc_mbit_err found [error status=0x%x]\n", + status); + set_bit(HNAE3_GLOBAL_RESET, &ae_dev->hw_err_reset_req); + } + + status = le32_to_cpu(*(desc_data + 4)) & HCLGE_SSU_COMMON_ERR_INT_MASK; + if (status) + hclge_log_error(dev, "SSU_COMMON_ERR_INT", + &hclge_ssu_com_err_int[0], status, + &ae_dev->hw_err_reset_req); + + /* log IGU(Ingress Unit) errors */ + desc_data = (__le32 *)&desc[3]; + status = le32_to_cpu(*desc_data) & HCLGE_IGU_INT_MASK; + if (status) + hclge_log_error(dev, "IGU_INT_STS", + &hclge_igu_int[0], status, + &ae_dev->hw_err_reset_req); + + /* log PPP(Programmable Packet Process) errors */ + desc_data = (__le32 *)&desc[4]; + status = le32_to_cpu(*(desc_data + 1)); + if (status) + hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST1", + &hclge_ppp_mpf_abnormal_int_st1[0], status, + &ae_dev->hw_err_reset_req); + + status = le32_to_cpu(*(desc_data + 3)) & HCLGE_PPP_MPF_INT_ST3_MASK; + if (status) + hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST3", + &hclge_ppp_mpf_abnormal_int_st3[0], status, + &ae_dev->hw_err_reset_req); + + /* log PPU(RCB) errors */ + desc_data = (__le32 *)&desc[5]; + status = le32_to_cpu(*(desc_data + 1)); + if (status) { + dev_err(dev, + "PPU_MPF_ABNORMAL_INT_ST1 rpu_rx_pkt_ecc_mbit_err found\n"); + set_bit(HNAE3_GLOBAL_RESET, &ae_dev->hw_err_reset_req); + } + + status = le32_to_cpu(*(desc_data + 2)); + if (status) + hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST2", + &hclge_ppu_mpf_abnormal_int_st2[0], status, + &ae_dev->hw_err_reset_req); + + status = le32_to_cpu(*(desc_data + 3)) & HCLGE_PPU_MPF_INT_ST3_MASK; + if (status) + hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST3", + &hclge_ppu_mpf_abnormal_int_st3[0], status, + &ae_dev->hw_err_reset_req); + + /* log TM(Traffic Manager) errors */ + desc_data = (__le32 *)&desc[6]; + status = le32_to_cpu(*desc_data); + if (status) + hclge_log_error(dev, "TM_SCH_RINT", + &hclge_tm_sch_rint[0], status, + &ae_dev->hw_err_reset_req); + + /* log QCN(Quantized Congestion Control) errors */ + desc_data = (__le32 *)&desc[7]; + status = le32_to_cpu(*desc_data) & HCLGE_QCN_FIFO_INT_MASK; + if (status) + hclge_log_error(dev, "QCN_FIFO_RINT", + &hclge_qcn_fifo_rint[0], status, + &ae_dev->hw_err_reset_req); + + status = le32_to_cpu(*(desc_data + 1)) & HCLGE_QCN_ECC_INT_MASK; + if (status) + hclge_log_error(dev, "QCN_ECC_RINT", + &hclge_qcn_ecc_rint[0], status, + &ae_dev->hw_err_reset_req); + + /* log NCSI errors */ + desc_data = (__le32 *)&desc[9]; + status = le32_to_cpu(*desc_data) & HCLGE_NCSI_ECC_INT_MASK; + if (status) + hclge_log_error(dev, "NCSI_ECC_INT_RPT", + &hclge_ncsi_err_int[0], status, + &ae_dev->hw_err_reset_req); + + /* clear all main PF RAS errors */ + hclge_comm_cmd_reuse_desc(&desc[0], false); + ret = hclge_cmd_send(&hdev->hw, &desc[0], num); + if (ret) + dev_err(dev, "clear all mpf ras int cmd failed (%d)\n", ret); + + return ret; +} + +/* hclge_handle_pf_ras_error: handle all PF RAS errors + * @hdev: pointer to struct hclge_dev + * @desc: descriptor for describing the command + * @num: number of extended command structures + * + * This function handles all the PF RAS errors in the + * hw registers using command. + */ +static int hclge_handle_pf_ras_error(struct hclge_dev *hdev, + struct hclge_desc *desc, + int num) +{ + struct hnae3_ae_dev *ae_dev = hdev->ae_dev; + struct device *dev = &hdev->pdev->dev; + __le32 *desc_data; + u32 status; + int ret; + + /* query all PF RAS errors */ + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_PF_RAS_INT, + true); + ret = hclge_cmd_send(&hdev->hw, &desc[0], num); + if (ret) { + dev_err(dev, "query all pf ras int cmd failed (%d)\n", ret); + return ret; + } + + /* log SSU(Storage Switch Unit) errors */ + status = le32_to_cpu(desc[0].data[0]); + if (status) + hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT", + &hclge_ssu_port_based_err_int[0], status, + &ae_dev->hw_err_reset_req); + + status = le32_to_cpu(desc[0].data[1]); + if (status) + hclge_log_error(dev, "SSU_FIFO_OVERFLOW_INT", + &hclge_ssu_fifo_overflow_int[0], status, + &ae_dev->hw_err_reset_req); + + status = le32_to_cpu(desc[0].data[2]); + if (status) + hclge_log_error(dev, "SSU_ETS_TCG_INT", + &hclge_ssu_ets_tcg_int[0], status, + &ae_dev->hw_err_reset_req); + + /* log IGU(Ingress Unit) EGU(Egress Unit) TNL errors */ + desc_data = (__le32 *)&desc[1]; + status = le32_to_cpu(*desc_data) & HCLGE_IGU_EGU_TNL_INT_MASK; + if (status) + hclge_log_error(dev, "IGU_EGU_TNL_INT_STS", + &hclge_igu_egu_tnl_int[0], status, + &ae_dev->hw_err_reset_req); + + /* log PPU(RCB) errors */ + desc_data = (__le32 *)&desc[3]; + status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_INT_RAS_MASK; + if (status) { + hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST0", + &hclge_ppu_pf_abnormal_int[0], status, + &ae_dev->hw_err_reset_req); + hclge_report_hw_error(hdev, HNAE3_PPU_POISON_ERROR); + } + + /* clear all PF RAS errors */ + hclge_comm_cmd_reuse_desc(&desc[0], false); + ret = hclge_cmd_send(&hdev->hw, &desc[0], num); + if (ret) + dev_err(dev, "clear all pf ras int cmd failed (%d)\n", ret); + + return ret; +} + +static int hclge_handle_all_ras_errors(struct hclge_dev *hdev) +{ + u32 mpf_bd_num, pf_bd_num, bd_num; + struct hclge_desc *desc; + int ret; + + /* query the number of registers in the RAS int status */ + ret = hclge_query_bd_num(hdev, true, &mpf_bd_num, &pf_bd_num); + if (ret) + return ret; + + bd_num = max_t(u32, mpf_bd_num, pf_bd_num); + desc = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL); + if (!desc) + return -ENOMEM; + + /* handle all main PF RAS errors */ + ret = hclge_handle_mpf_ras_error(hdev, desc, mpf_bd_num); + if (ret) { + kfree(desc); + return ret; + } + memset(desc, 0, bd_num * sizeof(struct hclge_desc)); + + /* handle all PF RAS errors */ + ret = hclge_handle_pf_ras_error(hdev, desc, pf_bd_num); + kfree(desc); + + return ret; +} + +static int hclge_log_rocee_axi_error(struct hclge_dev *hdev) +{ + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc[3]; + int ret; + + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_ROCEE_AXI_RAS_INFO_CMD, + true); + hclge_cmd_setup_basic_desc(&desc[1], HCLGE_QUERY_ROCEE_AXI_RAS_INFO_CMD, + true); + hclge_cmd_setup_basic_desc(&desc[2], HCLGE_QUERY_ROCEE_AXI_RAS_INFO_CMD, + true); + desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + + ret = hclge_cmd_send(&hdev->hw, &desc[0], 3); + if (ret) { + dev_err(dev, "failed(%d) to query ROCEE AXI error sts\n", ret); + return ret; + } + + dev_err(dev, "AXI1: %08X %08X %08X %08X %08X %08X\n", + le32_to_cpu(desc[0].data[0]), le32_to_cpu(desc[0].data[1]), + le32_to_cpu(desc[0].data[2]), le32_to_cpu(desc[0].data[3]), + le32_to_cpu(desc[0].data[4]), le32_to_cpu(desc[0].data[5])); + dev_err(dev, "AXI2: %08X %08X %08X %08X %08X %08X\n", + le32_to_cpu(desc[1].data[0]), le32_to_cpu(desc[1].data[1]), + le32_to_cpu(desc[1].data[2]), le32_to_cpu(desc[1].data[3]), + le32_to_cpu(desc[1].data[4]), le32_to_cpu(desc[1].data[5])); + dev_err(dev, "AXI3: %08X %08X %08X %08X\n", + le32_to_cpu(desc[2].data[0]), le32_to_cpu(desc[2].data[1]), + le32_to_cpu(desc[2].data[2]), le32_to_cpu(desc[2].data[3])); + + return 0; +} + +static int hclge_log_rocee_ecc_error(struct hclge_dev *hdev) +{ + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc[2]; + int ret; + + ret = hclge_cmd_query_error(hdev, &desc[0], + HCLGE_QUERY_ROCEE_ECC_RAS_INFO_CMD, + HCLGE_COMM_CMD_FLAG_NEXT); + if (ret) { + dev_err(dev, "failed(%d) to query ROCEE ECC error sts\n", ret); + return ret; + } + + dev_err(dev, "ECC1: %08X %08X %08X %08X %08X %08X\n", + le32_to_cpu(desc[0].data[0]), le32_to_cpu(desc[0].data[1]), + le32_to_cpu(desc[0].data[2]), le32_to_cpu(desc[0].data[3]), + le32_to_cpu(desc[0].data[4]), le32_to_cpu(desc[0].data[5])); + dev_err(dev, "ECC2: %08X %08X %08X\n", le32_to_cpu(desc[1].data[0]), + le32_to_cpu(desc[1].data[1]), le32_to_cpu(desc[1].data[2])); + + return 0; +} + +static int hclge_log_rocee_ovf_error(struct hclge_dev *hdev) +{ + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc[2]; + int ret; + + /* read overflow error status */ + ret = hclge_cmd_query_error(hdev, &desc[0], HCLGE_ROCEE_PF_RAS_INT_CMD, + 0); + if (ret) { + dev_err(dev, "failed(%d) to query ROCEE OVF error sts\n", ret); + return ret; + } + + /* log overflow error */ + if (le32_to_cpu(desc[0].data[0]) & HCLGE_ROCEE_OVF_ERR_INT_MASK) { + const struct hclge_hw_error *err; + u32 err_sts; + + err = &hclge_rocee_qmm_ovf_err_int[0]; + err_sts = HCLGE_ROCEE_OVF_ERR_TYPE_MASK & + le32_to_cpu(desc[0].data[0]); + while (err->msg) { + if (err->int_msk == err_sts) { + dev_err(dev, "%s [error status=0x%x] found\n", + err->msg, + le32_to_cpu(desc[0].data[0])); + break; + } + err++; + } + } + + if (le32_to_cpu(desc[0].data[1]) & HCLGE_ROCEE_OVF_ERR_INT_MASK) { + dev_err(dev, "ROCEE TSP OVF [error status=0x%x] found\n", + le32_to_cpu(desc[0].data[1])); + } + + if (le32_to_cpu(desc[0].data[2]) & HCLGE_ROCEE_OVF_ERR_INT_MASK) { + dev_err(dev, "ROCEE SCC OVF [error status=0x%x] found\n", + le32_to_cpu(desc[0].data[2])); + } + + return 0; +} + +static enum hnae3_reset_type +hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev) +{ + enum hnae3_reset_type reset_type = HNAE3_NONE_RESET; + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc[2]; + unsigned int status; + int ret; + + /* read RAS error interrupt status */ + ret = hclge_cmd_query_error(hdev, &desc[0], + HCLGE_QUERY_CLEAR_ROCEE_RAS_INT, 0); + if (ret) { + dev_err(dev, "failed(%d) to query ROCEE RAS INT SRC\n", ret); + /* reset everything for now */ + return HNAE3_GLOBAL_RESET; + } + + status = le32_to_cpu(desc[0].data[0]); + if (status & HCLGE_ROCEE_AXI_ERR_INT_MASK) { + if (status & HCLGE_ROCEE_RERR_INT_MASK) + dev_err(dev, "ROCEE RAS AXI rresp error\n"); + + if (status & HCLGE_ROCEE_BERR_INT_MASK) + dev_err(dev, "ROCEE RAS AXI bresp error\n"); + + reset_type = HNAE3_FUNC_RESET; + + hclge_report_hw_error(hdev, HNAE3_ROCEE_AXI_RESP_ERROR); + + ret = hclge_log_rocee_axi_error(hdev); + if (ret) + return HNAE3_GLOBAL_RESET; + } + + if (status & HCLGE_ROCEE_ECC_INT_MASK) { + dev_err(dev, "ROCEE RAS 2bit ECC error\n"); + reset_type = HNAE3_GLOBAL_RESET; + + ret = hclge_log_rocee_ecc_error(hdev); + if (ret) + return HNAE3_GLOBAL_RESET; + } + + if (status & HCLGE_ROCEE_OVF_INT_MASK) { + ret = hclge_log_rocee_ovf_error(hdev); + if (ret) { + dev_err(dev, "failed(%d) to process ovf error\n", ret); + /* reset everything for now */ + return HNAE3_GLOBAL_RESET; + } + } + + /* clear error status */ + hclge_comm_cmd_reuse_desc(&desc[0], false); + ret = hclge_cmd_send(&hdev->hw, &desc[0], 1); + if (ret) { + dev_err(dev, "failed(%d) to clear ROCEE RAS error\n", ret); + /* reset everything for now */ + return HNAE3_GLOBAL_RESET; + } + + return reset_type; +} + +int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en) +{ + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc; + int ret; + + if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2 || + !hnae3_dev_roce_supported(hdev)) + return 0; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_CONFIG_ROCEE_RAS_INT_EN, false); + if (en) { + /* enable ROCEE hw error interrupts */ + desc.data[0] = cpu_to_le32(HCLGE_ROCEE_RAS_NFE_INT_EN); + desc.data[1] = cpu_to_le32(HCLGE_ROCEE_RAS_CE_INT_EN); + + hclge_log_and_clear_rocee_ras_error(hdev); + } + desc.data[2] = cpu_to_le32(HCLGE_ROCEE_RAS_NFE_INT_EN_MASK); + desc.data[3] = cpu_to_le32(HCLGE_ROCEE_RAS_CE_INT_EN_MASK); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(dev, "failed(%d) to config ROCEE RAS interrupt\n", ret); + + return ret; +} + +static void hclge_handle_rocee_ras_error(struct hnae3_ae_dev *ae_dev) +{ + struct hclge_dev *hdev = ae_dev->priv; + enum hnae3_reset_type reset_type; + + if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) + return; + + reset_type = hclge_log_and_clear_rocee_ras_error(hdev); + if (reset_type != HNAE3_NONE_RESET) + set_bit(reset_type, &ae_dev->hw_err_reset_req); +} + +static const struct hclge_hw_blk hw_blk[] = { + { + .msk = BIT(0), + .name = "IGU_EGU", + .config_err_int = hclge_config_igu_egu_hw_err_int, + }, { + .msk = BIT(1), + .name = "PPP", + .config_err_int = hclge_config_ppp_hw_err_int, + }, { + .msk = BIT(2), + .name = "SSU", + .config_err_int = hclge_config_ssu_hw_err_int, + }, { + .msk = BIT(3), + .name = "PPU", + .config_err_int = hclge_config_ppu_hw_err_int, + }, { + .msk = BIT(4), + .name = "TM", + .config_err_int = hclge_config_tm_hw_err_int, + }, { + .msk = BIT(5), + .name = "COMMON", + .config_err_int = hclge_config_common_hw_err_int, + }, { + .msk = BIT(8), + .name = "MAC", + .config_err_int = hclge_config_mac_err_int, + }, { + /* sentinel */ + } +}; + +static void hclge_config_all_msix_error(struct hclge_dev *hdev, bool enable) +{ + u32 reg_val; + + reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG); + + if (enable) + reg_val |= BIT(HCLGE_VECTOR0_ALL_MSIX_ERR_B); + else + reg_val &= ~BIT(HCLGE_VECTOR0_ALL_MSIX_ERR_B); + + hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val); +} + +int hclge_config_nic_hw_error(struct hclge_dev *hdev, bool state) +{ + const struct hclge_hw_blk *module = hw_blk; + int ret = 0; + + hclge_config_all_msix_error(hdev, state); + + while (module->name) { + if (module->config_err_int) { + ret = module->config_err_int(hdev, state); + if (ret) + return ret; + } + module++; + } + + return ret; +} + +pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev) +{ + struct hclge_dev *hdev = ae_dev->priv; + struct device *dev = &hdev->pdev->dev; + u32 status; + + if (!test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state)) { + dev_err(dev, + "Can't recover - RAS error reported during dev init\n"); + return PCI_ERS_RESULT_NONE; + } + + status = hclge_read_dev(&hdev->hw, HCLGE_RAS_PF_OTHER_INT_STS_REG); + if (status & HCLGE_RAS_REG_NFE_MASK || + status & HCLGE_RAS_REG_ROCEE_ERR_MASK) + ae_dev->hw_err_reset_req = 0; + else + goto out; + + /* Handling Non-fatal HNS RAS errors */ + if (status & HCLGE_RAS_REG_NFE_MASK) { + dev_err(dev, + "HNS Non-Fatal RAS error(status=0x%x) identified\n", + status); + hclge_handle_all_ras_errors(hdev); + } + + /* Handling Non-fatal Rocee RAS errors */ + if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 && + status & HCLGE_RAS_REG_ROCEE_ERR_MASK) { + dev_err(dev, "ROCEE Non-Fatal RAS error identified\n"); + hclge_handle_rocee_ras_error(ae_dev); + } + + if (ae_dev->hw_err_reset_req) + return PCI_ERS_RESULT_NEED_RESET; + +out: + return PCI_ERS_RESULT_RECOVERED; +} + +static int hclge_clear_hw_msix_error(struct hclge_dev *hdev, + struct hclge_desc *desc, bool is_mpf, + u32 bd_num) +{ + if (is_mpf) + desc[0].opcode = + cpu_to_le16(HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT); + else + desc[0].opcode = cpu_to_le16(HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT); + + desc[0].flag = cpu_to_le16(HCLGE_COMM_CMD_FLAG_NO_INTR | + HCLGE_COMM_CMD_FLAG_IN); + + return hclge_cmd_send(&hdev->hw, &desc[0], bd_num); +} + +/* hclge_query_8bd_info: query information about over_8bd_nfe_err + * @hdev: pointer to struct hclge_dev + * @vf_id: Index of the virtual function with error + * @q_id: Physical index of the queue with error + * + * This function get specific index of queue and function which causes + * over_8bd_nfe_err by using command. If vf_id is 0, it means error is + * caused by PF instead of VF. + */ +static int hclge_query_over_8bd_err_info(struct hclge_dev *hdev, u16 *vf_id, + u16 *q_id) +{ + struct hclge_query_ppu_pf_other_int_dfx_cmd *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PPU_PF_OTHER_INT_DFX, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + return ret; + + req = (struct hclge_query_ppu_pf_other_int_dfx_cmd *)desc.data; + *vf_id = le16_to_cpu(req->over_8bd_no_fe_vf_id); + *q_id = le16_to_cpu(req->over_8bd_no_fe_qid); + + return 0; +} + +/* hclge_handle_over_8bd_err: handle MSI-X error named over_8bd_nfe_err + * @hdev: pointer to struct hclge_dev + * @reset_requests: reset level that we need to trigger later + * + * over_8bd_nfe_err is a special MSI-X because it may caused by a VF, in + * that case, we need to trigger VF reset. Otherwise, a PF reset is needed. + */ +static void hclge_handle_over_8bd_err(struct hclge_dev *hdev, + unsigned long *reset_requests) +{ + struct device *dev = &hdev->pdev->dev; + u16 vf_id; + u16 q_id; + int ret; + + ret = hclge_query_over_8bd_err_info(hdev, &vf_id, &q_id); + if (ret) { + dev_err(dev, "fail(%d) to query over_8bd_no_fe info\n", + ret); + return; + } + + dev_err(dev, "PPU_PF_ABNORMAL_INT_ST over_8bd_no_fe found, vport(%u), queue_id(%u)\n", + vf_id, q_id); + + if (vf_id) { + if (vf_id >= hdev->num_alloc_vport) { + dev_err(dev, "invalid vport(%u)\n", vf_id); + return; + } + + /* If we need to trigger other reset whose level is higher + * than HNAE3_VF_FUNC_RESET, no need to trigger a VF reset + * here. + */ + if (*reset_requests != 0) + return; + + ret = hclge_inform_reset_assert_to_vf(&hdev->vport[vf_id]); + if (ret) + dev_err(dev, "inform reset to vport(%u) failed %d!\n", + vf_id, ret); + } else { + set_bit(HNAE3_FUNC_RESET, reset_requests); + } +} + +/* hclge_handle_mpf_msix_error: handle all main PF MSI-X errors + * @hdev: pointer to struct hclge_dev + * @desc: descriptor for describing the command + * @mpf_bd_num: number of extended command structures + * @reset_requests: record of the reset level that we need + * + * This function handles all the main PF MSI-X errors in the hw register/s + * using command. + */ +static int hclge_handle_mpf_msix_error(struct hclge_dev *hdev, + struct hclge_desc *desc, + int mpf_bd_num, + unsigned long *reset_requests) +{ + struct device *dev = &hdev->pdev->dev; + __le32 *desc_data; + u32 status; + int ret; + /* query all main PF MSIx errors */ + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT, + true); + ret = hclge_cmd_send(&hdev->hw, &desc[0], mpf_bd_num); + if (ret) { + dev_err(dev, "query all mpf msix int cmd failed (%d)\n", ret); + return ret; + } + + /* log MAC errors */ + desc_data = (__le32 *)&desc[1]; + status = le32_to_cpu(*desc_data); + if (status) + hclge_log_error(dev, "MAC_AFIFO_TNL_INT_R", + &hclge_mac_afifo_tnl_int[0], status, + reset_requests); + + /* log PPU(RCB) MPF errors */ + desc_data = (__le32 *)&desc[5]; + status = le32_to_cpu(*(desc_data + 2)) & + HCLGE_PPU_MPF_INT_ST2_MSIX_MASK; + if (status) + dev_err(dev, "PPU_MPF_ABNORMAL_INT_ST2 rx_q_search_miss found [dfx status=0x%x\n]", + status); + + /* clear all main PF MSIx errors */ + ret = hclge_clear_hw_msix_error(hdev, desc, true, mpf_bd_num); + if (ret) + dev_err(dev, "clear all mpf msix int cmd failed (%d)\n", ret); + + return ret; +} + +/* hclge_handle_pf_msix_error: handle all PF MSI-X errors + * @hdev: pointer to struct hclge_dev + * @desc: descriptor for describing the command + * @mpf_bd_num: number of extended command structures + * @reset_requests: record of the reset level that we need + * + * This function handles all the PF MSI-X errors in the hw register/s using + * command. + */ +static int hclge_handle_pf_msix_error(struct hclge_dev *hdev, + struct hclge_desc *desc, + int pf_bd_num, + unsigned long *reset_requests) +{ + struct device *dev = &hdev->pdev->dev; + __le32 *desc_data; + u32 status; + int ret; + + /* query all PF MSIx errors */ + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT, + true); + ret = hclge_cmd_send(&hdev->hw, &desc[0], pf_bd_num); + if (ret) { + dev_err(dev, "query all pf msix int cmd failed (%d)\n", ret); + return ret; + } + + /* log SSU PF errors */ + status = le32_to_cpu(desc[0].data[0]) & HCLGE_SSU_PORT_INT_MSIX_MASK; + if (status) + hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT", + &hclge_ssu_port_based_pf_int[0], + status, reset_requests); + + /* read and log PPP PF errors */ + desc_data = (__le32 *)&desc[2]; + status = le32_to_cpu(*desc_data); + if (status) + hclge_log_error(dev, "PPP_PF_ABNORMAL_INT_ST0", + &hclge_ppp_pf_abnormal_int[0], + status, reset_requests); + + /* log PPU(RCB) PF errors */ + desc_data = (__le32 *)&desc[3]; + status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_INT_MSIX_MASK; + if (status) + hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST", + &hclge_ppu_pf_abnormal_int[0], + status, reset_requests); + + status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_OVER_8BD_ERR_MASK; + if (status) + hclge_handle_over_8bd_err(hdev, reset_requests); + + /* clear all PF MSIx errors */ + ret = hclge_clear_hw_msix_error(hdev, desc, false, pf_bd_num); + if (ret) + dev_err(dev, "clear all pf msix int cmd failed (%d)\n", ret); + + return ret; +} + +static int hclge_handle_all_hw_msix_error(struct hclge_dev *hdev, + unsigned long *reset_requests) +{ + u32 mpf_bd_num, pf_bd_num, bd_num; + struct hclge_desc *desc; + int ret; + + /* query the number of bds for the MSIx int status */ + ret = hclge_query_bd_num(hdev, false, &mpf_bd_num, &pf_bd_num); + if (ret) + goto out; + + bd_num = max_t(u32, mpf_bd_num, pf_bd_num); + desc = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL); + if (!desc) + return -ENOMEM; + + ret = hclge_handle_mpf_msix_error(hdev, desc, mpf_bd_num, + reset_requests); + if (ret) + goto msi_error; + + memset(desc, 0, bd_num * sizeof(struct hclge_desc)); + ret = hclge_handle_pf_msix_error(hdev, desc, pf_bd_num, reset_requests); + if (ret) + goto msi_error; + + ret = hclge_handle_mac_tnl(hdev); + +msi_error: + kfree(desc); +out: + return ret; +} + +int hclge_handle_hw_msix_error(struct hclge_dev *hdev, + unsigned long *reset_requests) +{ + struct device *dev = &hdev->pdev->dev; + + if (!test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state)) { + dev_err(dev, + "failed to handle msix error during dev init\n"); + return -EAGAIN; + } + + return hclge_handle_all_hw_msix_error(hdev, reset_requests); +} + +int hclge_handle_mac_tnl(struct hclge_dev *hdev) +{ + struct hclge_mac_tnl_stats mac_tnl_stats; + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc; + u32 status; + int ret; + + /* query and clear mac tnl interruptions */ + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_TNL_INT, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(dev, "failed to query mac tnl int, ret = %d.\n", ret); + return ret; + } + + status = le32_to_cpu(desc.data[0]); + if (status) { + /* When mac tnl interrupt occurs, we record current time and + * register status here in a fifo, then clear the status. So + * that if link status changes suddenly at some time, we can + * query them by debugfs. + */ + mac_tnl_stats.time = local_clock(); + mac_tnl_stats.status = status; + kfifo_put(&hdev->mac_tnl_log, mac_tnl_stats); + ret = hclge_clear_mac_tnl_int(hdev); + if (ret) + dev_err(dev, "failed to clear mac tnl int, ret = %d.\n", + ret); + } + + return ret; +} + +void hclge_handle_all_hns_hw_errors(struct hnae3_ae_dev *ae_dev) +{ + struct hclge_dev *hdev = ae_dev->priv; + struct device *dev = &hdev->pdev->dev; + u32 mpf_bd_num, pf_bd_num, bd_num; + struct hclge_desc *desc; + u32 status; + int ret; + + ae_dev->hw_err_reset_req = 0; + status = hclge_read_dev(&hdev->hw, HCLGE_RAS_PF_OTHER_INT_STS_REG); + + /* query the number of bds for the MSIx int status */ + ret = hclge_query_bd_num(hdev, false, &mpf_bd_num, &pf_bd_num); + if (ret) + return; + + bd_num = max_t(u32, mpf_bd_num, pf_bd_num); + desc = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL); + if (!desc) + return; + + /* Clear HNS hw errors reported through msix */ + memset(&desc[0].data[0], 0xFF, mpf_bd_num * sizeof(struct hclge_desc) - + HCLGE_DESC_NO_DATA_LEN); + ret = hclge_clear_hw_msix_error(hdev, desc, true, mpf_bd_num); + if (ret) { + dev_err(dev, "fail(%d) to clear mpf msix int during init\n", + ret); + goto msi_error; + } + + memset(&desc[0].data[0], 0xFF, pf_bd_num * sizeof(struct hclge_desc) - + HCLGE_DESC_NO_DATA_LEN); + ret = hclge_clear_hw_msix_error(hdev, desc, false, pf_bd_num); + if (ret) { + dev_err(dev, "fail(%d) to clear pf msix int during init\n", + ret); + goto msi_error; + } + + /* Handle Non-fatal HNS RAS errors */ + if (status & HCLGE_RAS_REG_NFE_MASK) { + dev_err(dev, "HNS hw error(RAS) identified during init\n"); + hclge_handle_all_ras_errors(hdev); + } + +msi_error: + kfree(desc); +} + +bool hclge_find_error_source(struct hclge_dev *hdev) +{ + u32 msix_src_flag, hw_err_src_flag; + + msix_src_flag = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) & + HCLGE_VECTOR0_REG_MSIX_MASK; + + hw_err_src_flag = hclge_read_dev(&hdev->hw, + HCLGE_RAS_PF_OTHER_INT_STS_REG) & + HCLGE_RAS_REG_ERR_MASK; + + return msix_src_flag || hw_err_src_flag; +} + +void hclge_handle_occurred_error(struct hclge_dev *hdev) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); + + if (hclge_find_error_source(hdev)) + hclge_handle_error_info_log(ae_dev); +} + +static void +hclge_handle_error_type_reg_log(struct device *dev, + struct hclge_mod_err_info *mod_info, + struct hclge_type_reg_err_info *type_reg_info) +{ +#define HCLGE_ERR_TYPE_MASK 0x7F +#define HCLGE_ERR_TYPE_IS_RAS_OFFSET 7 + + u8 mod_id, total_module, type_id, total_type, i, is_ras; + u8 index_module = MODULE_NONE; + u8 index_type = NONE_ERROR; + + mod_id = mod_info->mod_id; + type_id = type_reg_info->type_id & HCLGE_ERR_TYPE_MASK; + is_ras = type_reg_info->type_id >> HCLGE_ERR_TYPE_IS_RAS_OFFSET; + + total_module = ARRAY_SIZE(hclge_hw_module_id_st); + total_type = ARRAY_SIZE(hclge_hw_type_id_st); + + for (i = 0; i < total_module; i++) { + if (mod_id == hclge_hw_module_id_st[i].module_id) { + index_module = i; + break; + } + } + + for (i = 0; i < total_type; i++) { + if (type_id == hclge_hw_type_id_st[i].type_id) { + index_type = i; + break; + } + } + + if (index_module != MODULE_NONE && index_type != NONE_ERROR) + dev_err(dev, + "found %s %s, is %s error.\n", + hclge_hw_module_id_st[index_module].msg, + hclge_hw_type_id_st[index_type].msg, + is_ras ? "ras" : "msix"); + else + dev_err(dev, + "unknown module[%u] or type[%u].\n", mod_id, type_id); + + dev_err(dev, "reg_value:\n"); + for (i = 0; i < type_reg_info->reg_num; i++) + dev_err(dev, "0x%08x\n", type_reg_info->hclge_reg[i]); +} + +static void hclge_handle_error_module_log(struct hnae3_ae_dev *ae_dev, + const u32 *buf, u32 buf_size) +{ + struct hclge_type_reg_err_info *type_reg_info; + struct hclge_dev *hdev = ae_dev->priv; + struct device *dev = &hdev->pdev->dev; + struct hclge_mod_err_info *mod_info; + struct hclge_sum_err_info *sum_info; + u8 mod_num, err_num, i; + u32 offset = 0; + + sum_info = (struct hclge_sum_err_info *)&buf[offset++]; + if (sum_info->reset_type && + sum_info->reset_type != HNAE3_NONE_RESET) + set_bit(sum_info->reset_type, &ae_dev->hw_err_reset_req); + mod_num = sum_info->mod_num; + + while (mod_num--) { + if (offset >= buf_size) { + dev_err(dev, "The offset(%u) exceeds buf's size(%u).\n", + offset, buf_size); + return; + } + mod_info = (struct hclge_mod_err_info *)&buf[offset++]; + err_num = mod_info->err_num; + + for (i = 0; i < err_num; i++) { + if (offset >= buf_size) { + dev_err(dev, + "The offset(%u) exceeds buf size(%u).\n", + offset, buf_size); + return; + } + + type_reg_info = (struct hclge_type_reg_err_info *) + &buf[offset++]; + hclge_handle_error_type_reg_log(dev, mod_info, + type_reg_info); + + offset += type_reg_info->reg_num; + } + } +} + +static int hclge_query_all_err_bd_num(struct hclge_dev *hdev, u32 *bd_num) +{ + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc_bd; + int ret; + + hclge_cmd_setup_basic_desc(&desc_bd, HCLGE_QUERY_ALL_ERR_BD_NUM, true); + ret = hclge_cmd_send(&hdev->hw, &desc_bd, 1); + if (ret) { + dev_err(dev, "failed to query error bd_num, ret = %d.\n", ret); + return ret; + } + + *bd_num = le32_to_cpu(desc_bd.data[0]); + if (!(*bd_num)) { + dev_err(dev, "The value of bd_num is 0!\n"); + return -EINVAL; + } + + return 0; +} + +static int hclge_query_all_err_info(struct hclge_dev *hdev, + struct hclge_desc *desc, u32 bd_num) +{ + struct device *dev = &hdev->pdev->dev; + int ret; + + hclge_cmd_setup_basic_desc(desc, HCLGE_QUERY_ALL_ERR_INFO, true); + ret = hclge_cmd_send(&hdev->hw, desc, bd_num); + if (ret) + dev_err(dev, "failed to query error info, ret = %d.\n", ret); + + return ret; +} + +int hclge_handle_error_info_log(struct hnae3_ae_dev *ae_dev) +{ + u32 bd_num, desc_len, buf_len, buf_size, i; + struct hclge_dev *hdev = ae_dev->priv; + struct hclge_desc *desc; + __le32 *desc_data; + u32 *buf; + int ret; + + ret = hclge_query_all_err_bd_num(hdev, &bd_num); + if (ret) + goto out; + + desc_len = bd_num * sizeof(struct hclge_desc); + desc = kzalloc(desc_len, GFP_KERNEL); + if (!desc) { + ret = -ENOMEM; + goto out; + } + + ret = hclge_query_all_err_info(hdev, desc, bd_num); + if (ret) + goto err_desc; + + buf_len = bd_num * sizeof(struct hclge_desc) - HCLGE_DESC_NO_DATA_LEN; + buf_size = buf_len / sizeof(u32); + + desc_data = kzalloc(buf_len, GFP_KERNEL); + if (!desc_data) { + ret = -ENOMEM; + goto err_desc; + } + + buf = kzalloc(buf_len, GFP_KERNEL); + if (!buf) { + ret = -ENOMEM; + goto err_buf_alloc; + } + + memcpy(desc_data, &desc[0].data[0], buf_len); + for (i = 0; i < buf_size; i++) + buf[i] = le32_to_cpu(desc_data[i]); + + hclge_handle_error_module_log(ae_dev, buf, buf_size); + kfree(buf); + +err_buf_alloc: + kfree(desc_data); +err_desc: + kfree(desc); +out: + return ret; +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h new file mode 100644 index 000000000..86be6fb32 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h @@ -0,0 +1,231 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2016-2017 Hisilicon Limited. */ + +#ifndef __HCLGE_ERR_H +#define __HCLGE_ERR_H + +#include "hclge_main.h" +#include "hnae3.h" + +#define HCLGE_MPF_RAS_INT_MIN_BD_NUM 10 +#define HCLGE_PF_RAS_INT_MIN_BD_NUM 4 +#define HCLGE_MPF_MSIX_INT_MIN_BD_NUM 10 +#define HCLGE_PF_MSIX_INT_MIN_BD_NUM 4 + +#define HCLGE_RAS_PF_OTHER_INT_STS_REG 0x20B00 +#define HCLGE_RAS_REG_NFE_MASK 0xFF00 +#define HCLGE_RAS_REG_ROCEE_ERR_MASK 0x3000000 +#define HCLGE_RAS_REG_ERR_MASK \ + (HCLGE_RAS_REG_NFE_MASK | HCLGE_RAS_REG_ROCEE_ERR_MASK) + +#define HCLGE_VECTOR0_REG_MSIX_MASK 0x1FF00 + +#define HCLGE_IMP_TCM_ECC_ERR_INT_EN 0xFFFF0000 +#define HCLGE_IMP_TCM_ECC_ERR_INT_EN_MASK 0xFFFF0000 +#define HCLGE_IMP_ITCM4_ECC_ERR_INT_EN 0x300 +#define HCLGE_IMP_ITCM4_ECC_ERR_INT_EN_MASK 0x300 +#define HCLGE_CMDQ_NIC_ECC_ERR_INT_EN 0xFFFF +#define HCLGE_CMDQ_NIC_ECC_ERR_INT_EN_MASK 0xFFFF +#define HCLGE_CMDQ_ROCEE_ECC_ERR_INT_EN 0xFFFF0000 +#define HCLGE_CMDQ_ROCEE_ECC_ERR_INT_EN_MASK 0xFFFF0000 +#define HCLGE_IMP_RD_POISON_ERR_INT_EN 0x0100 +#define HCLGE_IMP_RD_POISON_ERR_INT_EN_MASK 0x0100 +#define HCLGE_TQP_ECC_ERR_INT_EN 0x0FFF +#define HCLGE_TQP_ECC_ERR_INT_EN_MASK 0x0FFF +#define HCLGE_MSIX_SRAM_ECC_ERR_INT_EN_MASK 0x0F000000 +#define HCLGE_MSIX_SRAM_ECC_ERR_INT_EN 0x0F000000 +#define HCLGE_IGU_ERR_INT_EN 0x0000000F +#define HCLGE_IGU_ERR_INT_TYPE 0x00000660 +#define HCLGE_IGU_ERR_INT_EN_MASK 0x000F +#define HCLGE_IGU_TNL_ERR_INT_EN 0x0002AABF +#define HCLGE_IGU_TNL_ERR_INT_EN_MASK 0x003F +#define HCLGE_PPP_MPF_ECC_ERR_INT0_EN 0xFFFFFFFF +#define HCLGE_PPP_MPF_ECC_ERR_INT0_EN_MASK 0xFFFFFFFF +#define HCLGE_PPP_MPF_ECC_ERR_INT1_EN 0xFFFFFFFF +#define HCLGE_PPP_MPF_ECC_ERR_INT1_EN_MASK 0xFFFFFFFF +#define HCLGE_PPP_PF_ERR_INT_EN 0x0003 +#define HCLGE_PPP_PF_ERR_INT_EN_MASK 0x0003 +#define HCLGE_PPP_MPF_ECC_ERR_INT2_EN 0x003F +#define HCLGE_PPP_MPF_ECC_ERR_INT2_EN_MASK 0x003F +#define HCLGE_PPP_MPF_ECC_ERR_INT3_EN 0x003F +#define HCLGE_PPP_MPF_ECC_ERR_INT3_EN_MASK 0x003F +#define HCLGE_TM_SCH_ECC_ERR_INT_EN 0x3 +#define HCLGE_TM_QCN_ERR_INT_TYPE 0x29 +#define HCLGE_TM_QCN_FIFO_INT_EN 0xFFFF00 +#define HCLGE_TM_QCN_MEM_ERR_INT_EN 0xFFFFFF +#define HCLGE_NCSI_ERR_INT_EN 0x3 +#define HCLGE_NCSI_ERR_INT_TYPE 0x9 +#define HCLGE_MAC_COMMON_ERR_INT_EN 0x107FF +#define HCLGE_MAC_COMMON_ERR_INT_EN_MASK 0x107FF +#define HCLGE_MAC_TNL_INT_EN GENMASK(9, 0) +#define HCLGE_MAC_TNL_INT_EN_MASK GENMASK(9, 0) +#define HCLGE_MAC_TNL_INT_CLR GENMASK(9, 0) +#define HCLGE_PPU_MPF_ABNORMAL_INT0_EN GENMASK(31, 0) +#define HCLGE_PPU_MPF_ABNORMAL_INT0_EN_MASK GENMASK(31, 0) +#define HCLGE_PPU_MPF_ABNORMAL_INT1_EN GENMASK(31, 0) +#define HCLGE_PPU_MPF_ABNORMAL_INT1_EN_MASK GENMASK(31, 0) +#define HCLGE_PPU_MPF_ABNORMAL_INT2_EN 0x3FFF3FFF +#define HCLGE_PPU_MPF_ABNORMAL_INT2_EN_MASK 0x3FFF3FFF +#define HCLGE_PPU_MPF_ABNORMAL_INT2_EN2 0xB +#define HCLGE_PPU_MPF_ABNORMAL_INT2_EN2_MASK 0xB +#define HCLGE_PPU_MPF_ABNORMAL_INT3_EN GENMASK(7, 0) +#define HCLGE_PPU_MPF_ABNORMAL_INT3_EN_MASK GENMASK(23, 16) +#define HCLGE_PPU_PF_ABNORMAL_INT_EN GENMASK(5, 0) +#define HCLGE_PPU_PF_ABNORMAL_INT_EN_MASK GENMASK(5, 0) +#define HCLGE_SSU_1BIT_ECC_ERR_INT_EN GENMASK(31, 0) +#define HCLGE_SSU_1BIT_ECC_ERR_INT_EN_MASK GENMASK(31, 0) +#define HCLGE_SSU_MULTI_BIT_ECC_ERR_INT_EN GENMASK(31, 0) +#define HCLGE_SSU_MULTI_BIT_ECC_ERR_INT_EN_MASK GENMASK(31, 0) +#define HCLGE_SSU_BIT32_ECC_ERR_INT_EN 0x0101 +#define HCLGE_SSU_BIT32_ECC_ERR_INT_EN_MASK 0x0101 +#define HCLGE_SSU_COMMON_INT_EN GENMASK(9, 0) +#define HCLGE_SSU_COMMON_INT_EN_MASK GENMASK(9, 0) +#define HCLGE_SSU_PORT_BASED_ERR_INT_EN 0x0BFF +#define HCLGE_SSU_PORT_BASED_ERR_INT_EN_MASK 0x0BFF0000 +#define HCLGE_SSU_FIFO_OVERFLOW_ERR_INT_EN GENMASK(23, 0) +#define HCLGE_SSU_FIFO_OVERFLOW_ERR_INT_EN_MASK GENMASK(23, 0) + +#define HCLGE_SSU_COMMON_ERR_INT_MASK GENMASK(9, 0) +#define HCLGE_SSU_PORT_INT_MSIX_MASK 0x7BFF +#define HCLGE_IGU_INT_MASK GENMASK(3, 0) +#define HCLGE_IGU_EGU_TNL_INT_MASK GENMASK(5, 0) +#define HCLGE_PPP_MPF_INT_ST3_MASK GENMASK(5, 0) +#define HCLGE_PPU_MPF_INT_ST3_MASK GENMASK(7, 0) +#define HCLGE_PPU_MPF_INT_ST2_MSIX_MASK BIT(29) +#define HCLGE_PPU_PF_INT_RAS_MASK 0x18 +#define HCLGE_PPU_PF_INT_MSIX_MASK 0x26 +#define HCLGE_PPU_PF_OVER_8BD_ERR_MASK 0x01 +#define HCLGE_QCN_FIFO_INT_MASK GENMASK(17, 0) +#define HCLGE_QCN_ECC_INT_MASK GENMASK(21, 0) +#define HCLGE_NCSI_ECC_INT_MASK GENMASK(1, 0) + +#define HCLGE_ROCEE_RAS_NFE_INT_EN 0xF +#define HCLGE_ROCEE_RAS_CE_INT_EN 0x1 +#define HCLGE_ROCEE_RAS_NFE_INT_EN_MASK 0xF +#define HCLGE_ROCEE_RAS_CE_INT_EN_MASK 0x1 +#define HCLGE_ROCEE_RERR_INT_MASK BIT(0) +#define HCLGE_ROCEE_BERR_INT_MASK BIT(1) +#define HCLGE_ROCEE_AXI_ERR_INT_MASK GENMASK(1, 0) +#define HCLGE_ROCEE_ECC_INT_MASK BIT(2) +#define HCLGE_ROCEE_OVF_INT_MASK BIT(3) +#define HCLGE_ROCEE_OVF_ERR_INT_MASK 0x10000 +#define HCLGE_ROCEE_OVF_ERR_TYPE_MASK 0x3F + +#define HCLGE_DESC_DATA_MAX 8 +#define HCLGE_REG_NUM_MAX 256 +#define HCLGE_DESC_NO_DATA_LEN 8 + +enum hclge_err_int_type { + HCLGE_ERR_INT_MSIX = 0, + HCLGE_ERR_INT_RAS_CE = 1, + HCLGE_ERR_INT_RAS_NFE = 2, + HCLGE_ERR_INT_RAS_FE = 3, +}; + +enum hclge_mod_name_list { + MODULE_NONE = 0, + MODULE_BIOS_COMMON = 1, + MODULE_GE = 2, + MODULE_IGU_EGU = 3, + MODULE_LGE = 4, + MODULE_NCSI = 5, + MODULE_PPP = 6, + MODULE_QCN = 7, + MODULE_RCB_RX = 8, + MODULE_RTC = 9, + MODULE_SSU = 10, + MODULE_TM = 11, + MODULE_RCB_TX = 12, + MODULE_TXDMA = 13, + MODULE_MASTER = 14, + MODULE_HIMAC = 15, + /* add new MODULE NAME for NIC here in order */ + MODULE_ROCEE_TOP = 40, + MODULE_ROCEE_TIMER = 41, + MODULE_ROCEE_MDB = 42, + MODULE_ROCEE_TSP = 43, + MODULE_ROCEE_TRP = 44, + MODULE_ROCEE_SCC = 45, + MODULE_ROCEE_CAEP = 46, + MODULE_ROCEE_GEN_AC = 47, + MODULE_ROCEE_QMM = 48, + MODULE_ROCEE_LSAN = 49, + /* add new MODULE NAME for RoCEE here in order */ +}; + +enum hclge_err_type_list { + NONE_ERROR = 0, + FIFO_ERROR = 1, + MEMORY_ERROR = 2, + POISON_ERROR = 3, + MSIX_ECC_ERROR = 4, + TQP_INT_ECC_ERROR = 5, + PF_ABNORMAL_INT_ERROR = 6, + MPF_ABNORMAL_INT_ERROR = 7, + COMMON_ERROR = 8, + PORT_ERROR = 9, + ETS_ERROR = 10, + NCSI_ERROR = 11, + GLB_ERROR = 12, + LINK_ERROR = 13, + PTP_ERROR = 14, + /* add new ERROR TYPE for NIC here in order */ + ROCEE_NORMAL_ERR = 40, + ROCEE_OVF_ERR = 41, + ROCEE_BUS_ERR = 42, + /* add new ERROR TYPE for ROCEE here in order */ +}; + +struct hclge_hw_blk { + u32 msk; + const char *name; + int (*config_err_int)(struct hclge_dev *hdev, bool en); +}; + +struct hclge_hw_error { + u32 int_msk; + const char *msg; + enum hnae3_reset_type reset_level; +}; + +struct hclge_hw_module_id { + enum hclge_mod_name_list module_id; + const char *msg; +}; + +struct hclge_hw_type_id { + enum hclge_err_type_list type_id; + const char *msg; +}; + +struct hclge_sum_err_info { + u8 reset_type; + u8 mod_num; + u8 rsv[2]; +}; + +struct hclge_mod_err_info { + u8 mod_id; + u8 err_num; + u8 rsv[2]; +}; + +struct hclge_type_reg_err_info { + u8 type_id; + u8 reg_num; + u8 rsv[2]; + u32 hclge_reg[HCLGE_REG_NUM_MAX]; +}; + +int hclge_config_mac_tnl_int(struct hclge_dev *hdev, bool en); +int hclge_config_nic_hw_error(struct hclge_dev *hdev, bool state); +int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en); +void hclge_handle_all_hns_hw_errors(struct hnae3_ae_dev *ae_dev); +bool hclge_find_error_source(struct hclge_dev *hdev); +void hclge_handle_occurred_error(struct hclge_dev *hdev); +pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev); +int hclge_handle_hw_msix_error(struct hclge_dev *hdev, + unsigned long *reset_requests); +int hclge_handle_error_info_log(struct hnae3_ae_dev *ae_dev); +int hclge_handle_mac_tnl(struct hclge_dev *hdev); +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c new file mode 100644 index 000000000..48b0cb5ec --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -0,0 +1,13229 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "hclge_cmd.h" +#include "hclge_dcb.h" +#include "hclge_main.h" +#include "hclge_mbx.h" +#include "hclge_mdio.h" +#include "hclge_tm.h" +#include "hclge_err.h" +#include "hnae3.h" +#include "hclge_devlink.h" +#include "hclge_comm_cmd.h" + +#define HCLGE_NAME "hclge" + +#define HCLGE_BUF_SIZE_UNIT 256U +#define HCLGE_BUF_MUL_BY 2 +#define HCLGE_BUF_DIV_BY 2 +#define NEED_RESERVE_TC_NUM 2 +#define BUF_MAX_PERCENT 100 +#define BUF_RESERVE_PERCENT 90 + +#define HCLGE_RESET_MAX_FAIL_CNT 5 +#define HCLGE_RESET_SYNC_TIME 100 +#define HCLGE_PF_RESET_SYNC_TIME 20 +#define HCLGE_PF_RESET_SYNC_CNT 1500 + +/* Get DFX BD number offset */ +#define HCLGE_DFX_BIOS_BD_OFFSET 1 +#define HCLGE_DFX_SSU_0_BD_OFFSET 2 +#define HCLGE_DFX_SSU_1_BD_OFFSET 3 +#define HCLGE_DFX_IGU_BD_OFFSET 4 +#define HCLGE_DFX_RPU_0_BD_OFFSET 5 +#define HCLGE_DFX_RPU_1_BD_OFFSET 6 +#define HCLGE_DFX_NCSI_BD_OFFSET 7 +#define HCLGE_DFX_RTC_BD_OFFSET 8 +#define HCLGE_DFX_PPP_BD_OFFSET 9 +#define HCLGE_DFX_RCB_BD_OFFSET 10 +#define HCLGE_DFX_TQP_BD_OFFSET 11 +#define HCLGE_DFX_SSU_2_BD_OFFSET 12 + +#define HCLGE_LINK_STATUS_MS 10 + +static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps); +static int hclge_init_vlan_config(struct hclge_dev *hdev); +static void hclge_sync_vlan_filter(struct hclge_dev *hdev); +static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev); +static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle); +static void hclge_rfs_filter_expire(struct hclge_dev *hdev); +static int hclge_clear_arfs_rules(struct hclge_dev *hdev); +static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev, + unsigned long *addr); +static int hclge_set_default_loopback(struct hclge_dev *hdev); + +static void hclge_sync_mac_table(struct hclge_dev *hdev); +static void hclge_restore_hw_table(struct hclge_dev *hdev); +static void hclge_sync_promisc_mode(struct hclge_dev *hdev); +static void hclge_sync_fd_table(struct hclge_dev *hdev); +static void hclge_update_fec_stats(struct hclge_dev *hdev); +static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret, + int wait_cnt); +static int hclge_update_port_info(struct hclge_dev *hdev); + +static struct hnae3_ae_algo ae_algo; + +static struct workqueue_struct *hclge_wq; + +static const struct pci_device_id ae_algo_pci_tbl[] = { + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0}, + /* required last entry */ + {0, } +}; + +MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl); + +static const u32 cmdq_reg_addr_list[] = {HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG, + HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG, + HCLGE_COMM_NIC_CSQ_DEPTH_REG, + HCLGE_COMM_NIC_CSQ_TAIL_REG, + HCLGE_COMM_NIC_CSQ_HEAD_REG, + HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG, + HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG, + HCLGE_COMM_NIC_CRQ_DEPTH_REG, + HCLGE_COMM_NIC_CRQ_TAIL_REG, + HCLGE_COMM_NIC_CRQ_HEAD_REG, + HCLGE_COMM_VECTOR0_CMDQ_SRC_REG, + HCLGE_COMM_CMDQ_INTR_STS_REG, + HCLGE_COMM_CMDQ_INTR_EN_REG, + HCLGE_COMM_CMDQ_INTR_GEN_REG}; + +static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE, + HCLGE_PF_OTHER_INT_REG, + HCLGE_MISC_RESET_STS_REG, + HCLGE_MISC_VECTOR_INT_STS, + HCLGE_GLOBAL_RESET_REG, + HCLGE_FUN_RST_ING, + HCLGE_GRO_EN_REG}; + +static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG, + HCLGE_RING_RX_ADDR_H_REG, + HCLGE_RING_RX_BD_NUM_REG, + HCLGE_RING_RX_BD_LENGTH_REG, + HCLGE_RING_RX_MERGE_EN_REG, + HCLGE_RING_RX_TAIL_REG, + HCLGE_RING_RX_HEAD_REG, + HCLGE_RING_RX_FBD_NUM_REG, + HCLGE_RING_RX_OFFSET_REG, + HCLGE_RING_RX_FBD_OFFSET_REG, + HCLGE_RING_RX_STASH_REG, + HCLGE_RING_RX_BD_ERR_REG, + HCLGE_RING_TX_ADDR_L_REG, + HCLGE_RING_TX_ADDR_H_REG, + HCLGE_RING_TX_BD_NUM_REG, + HCLGE_RING_TX_PRIORITY_REG, + HCLGE_RING_TX_TC_REG, + HCLGE_RING_TX_MERGE_EN_REG, + HCLGE_RING_TX_TAIL_REG, + HCLGE_RING_TX_HEAD_REG, + HCLGE_RING_TX_FBD_NUM_REG, + HCLGE_RING_TX_OFFSET_REG, + HCLGE_RING_TX_EBD_NUM_REG, + HCLGE_RING_TX_EBD_OFFSET_REG, + HCLGE_RING_TX_BD_ERR_REG, + HCLGE_RING_EN_REG}; + +static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG, + HCLGE_TQP_INTR_GL0_REG, + HCLGE_TQP_INTR_GL1_REG, + HCLGE_TQP_INTR_GL2_REG, + HCLGE_TQP_INTR_RL_REG}; + +static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = { + "External Loopback test", + "App Loopback test", + "Serdes serial Loopback test", + "Serdes parallel Loopback test", + "Phy Loopback test" +}; + +static const struct hclge_comm_stats_str g_mac_stats_string[] = { + {"mac_tx_mac_pause_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)}, + {"mac_rx_mac_pause_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)}, + {"mac_tx_pause_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pause_xoff_time)}, + {"mac_rx_pause_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pause_xoff_time)}, + {"mac_tx_control_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)}, + {"mac_rx_control_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)}, + {"mac_tx_pfc_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)}, + {"mac_tx_pfc_pri0_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)}, + {"mac_tx_pfc_pri1_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)}, + {"mac_tx_pfc_pri2_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)}, + {"mac_tx_pfc_pri3_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)}, + {"mac_tx_pfc_pri4_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)}, + {"mac_tx_pfc_pri5_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)}, + {"mac_tx_pfc_pri6_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)}, + {"mac_tx_pfc_pri7_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)}, + {"mac_tx_pfc_pri0_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_xoff_time)}, + {"mac_tx_pfc_pri1_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_xoff_time)}, + {"mac_tx_pfc_pri2_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_xoff_time)}, + {"mac_tx_pfc_pri3_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_xoff_time)}, + {"mac_tx_pfc_pri4_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_xoff_time)}, + {"mac_tx_pfc_pri5_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_xoff_time)}, + {"mac_tx_pfc_pri6_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_xoff_time)}, + {"mac_tx_pfc_pri7_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_xoff_time)}, + {"mac_rx_pfc_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)}, + {"mac_rx_pfc_pri0_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)}, + {"mac_rx_pfc_pri1_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)}, + {"mac_rx_pfc_pri2_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)}, + {"mac_rx_pfc_pri3_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)}, + {"mac_rx_pfc_pri4_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)}, + {"mac_rx_pfc_pri5_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)}, + {"mac_rx_pfc_pri6_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)}, + {"mac_rx_pfc_pri7_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)}, + {"mac_rx_pfc_pri0_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_xoff_time)}, + {"mac_rx_pfc_pri1_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_xoff_time)}, + {"mac_rx_pfc_pri2_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_xoff_time)}, + {"mac_rx_pfc_pri3_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_xoff_time)}, + {"mac_rx_pfc_pri4_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_xoff_time)}, + {"mac_rx_pfc_pri5_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_xoff_time)}, + {"mac_rx_pfc_pri6_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_xoff_time)}, + {"mac_rx_pfc_pri7_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_xoff_time)}, + {"mac_tx_total_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)}, + {"mac_tx_total_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)}, + {"mac_tx_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)}, + {"mac_tx_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)}, + {"mac_tx_good_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)}, + {"mac_tx_bad_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)}, + {"mac_tx_uni_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)}, + {"mac_tx_multi_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)}, + {"mac_tx_broad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)}, + {"mac_tx_undersize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)}, + {"mac_tx_oversize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)}, + {"mac_tx_64_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)}, + {"mac_tx_65_127_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)}, + {"mac_tx_128_255_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)}, + {"mac_tx_256_511_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)}, + {"mac_tx_512_1023_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)}, + {"mac_tx_1024_1518_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)}, + {"mac_tx_1519_2047_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)}, + {"mac_tx_2048_4095_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)}, + {"mac_tx_4096_8191_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)}, + {"mac_tx_8192_9216_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)}, + {"mac_tx_9217_12287_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)}, + {"mac_tx_12288_16383_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)}, + {"mac_tx_1519_max_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)}, + {"mac_tx_1519_max_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)}, + {"mac_rx_total_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)}, + {"mac_rx_total_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)}, + {"mac_rx_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)}, + {"mac_rx_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)}, + {"mac_rx_good_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)}, + {"mac_rx_bad_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)}, + {"mac_rx_uni_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)}, + {"mac_rx_multi_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)}, + {"mac_rx_broad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)}, + {"mac_rx_undersize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)}, + {"mac_rx_oversize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)}, + {"mac_rx_64_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)}, + {"mac_rx_65_127_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)}, + {"mac_rx_128_255_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)}, + {"mac_rx_256_511_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)}, + {"mac_rx_512_1023_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)}, + {"mac_rx_1024_1518_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)}, + {"mac_rx_1519_2047_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)}, + {"mac_rx_2048_4095_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)}, + {"mac_rx_4096_8191_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)}, + {"mac_rx_8192_9216_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)}, + {"mac_rx_9217_12287_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)}, + {"mac_rx_12288_16383_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)}, + {"mac_rx_1519_max_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)}, + {"mac_rx_1519_max_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)}, + + {"mac_tx_fragment_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)}, + {"mac_tx_undermin_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)}, + {"mac_tx_jabber_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)}, + {"mac_tx_err_all_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)}, + {"mac_tx_from_app_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)}, + {"mac_tx_from_app_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)}, + {"mac_rx_fragment_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)}, + {"mac_rx_undermin_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)}, + {"mac_rx_jabber_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)}, + {"mac_rx_fcs_err_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)}, + {"mac_rx_send_app_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)}, + {"mac_rx_send_app_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)} +}; + +static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = { + { + .flags = HCLGE_MAC_MGR_MASK_VLAN_B, + .ethter_type = cpu_to_le16(ETH_P_LLDP), + .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e}, + .i_port_bitmap = 0x1, + }, +}; + +static const u32 hclge_dfx_bd_offset_list[] = { + HCLGE_DFX_BIOS_BD_OFFSET, + HCLGE_DFX_SSU_0_BD_OFFSET, + HCLGE_DFX_SSU_1_BD_OFFSET, + HCLGE_DFX_IGU_BD_OFFSET, + HCLGE_DFX_RPU_0_BD_OFFSET, + HCLGE_DFX_RPU_1_BD_OFFSET, + HCLGE_DFX_NCSI_BD_OFFSET, + HCLGE_DFX_RTC_BD_OFFSET, + HCLGE_DFX_PPP_BD_OFFSET, + HCLGE_DFX_RCB_BD_OFFSET, + HCLGE_DFX_TQP_BD_OFFSET, + HCLGE_DFX_SSU_2_BD_OFFSET +}; + +static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = { + HCLGE_OPC_DFX_BIOS_COMMON_REG, + HCLGE_OPC_DFX_SSU_REG_0, + HCLGE_OPC_DFX_SSU_REG_1, + HCLGE_OPC_DFX_IGU_EGU_REG, + HCLGE_OPC_DFX_RPU_REG_0, + HCLGE_OPC_DFX_RPU_REG_1, + HCLGE_OPC_DFX_NCSI_REG, + HCLGE_OPC_DFX_RTC_REG, + HCLGE_OPC_DFX_PPP_REG, + HCLGE_OPC_DFX_RCB_REG, + HCLGE_OPC_DFX_TQP_REG, + HCLGE_OPC_DFX_SSU_REG_2 +}; + +static const struct key_info meta_data_key_info[] = { + { PACKET_TYPE_ID, 6 }, + { IP_FRAGEMENT, 1 }, + { ROCE_TYPE, 1 }, + { NEXT_KEY, 5 }, + { VLAN_NUMBER, 2 }, + { SRC_VPORT, 12 }, + { DST_VPORT, 12 }, + { TUNNEL_PACKET, 1 }, +}; + +static const struct key_info tuple_key_info[] = { + { OUTER_DST_MAC, 48, KEY_OPT_MAC, -1, -1 }, + { OUTER_SRC_MAC, 48, KEY_OPT_MAC, -1, -1 }, + { OUTER_VLAN_TAG_FST, 16, KEY_OPT_LE16, -1, -1 }, + { OUTER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 }, + { OUTER_ETH_TYPE, 16, KEY_OPT_LE16, -1, -1 }, + { OUTER_L2_RSV, 16, KEY_OPT_LE16, -1, -1 }, + { OUTER_IP_TOS, 8, KEY_OPT_U8, -1, -1 }, + { OUTER_IP_PROTO, 8, KEY_OPT_U8, -1, -1 }, + { OUTER_SRC_IP, 32, KEY_OPT_IP, -1, -1 }, + { OUTER_DST_IP, 32, KEY_OPT_IP, -1, -1 }, + { OUTER_L3_RSV, 16, KEY_OPT_LE16, -1, -1 }, + { OUTER_SRC_PORT, 16, KEY_OPT_LE16, -1, -1 }, + { OUTER_DST_PORT, 16, KEY_OPT_LE16, -1, -1 }, + { OUTER_L4_RSV, 32, KEY_OPT_LE32, -1, -1 }, + { OUTER_TUN_VNI, 24, KEY_OPT_VNI, -1, -1 }, + { OUTER_TUN_FLOW_ID, 8, KEY_OPT_U8, -1, -1 }, + { INNER_DST_MAC, 48, KEY_OPT_MAC, + offsetof(struct hclge_fd_rule, tuples.dst_mac), + offsetof(struct hclge_fd_rule, tuples_mask.dst_mac) }, + { INNER_SRC_MAC, 48, KEY_OPT_MAC, + offsetof(struct hclge_fd_rule, tuples.src_mac), + offsetof(struct hclge_fd_rule, tuples_mask.src_mac) }, + { INNER_VLAN_TAG_FST, 16, KEY_OPT_LE16, + offsetof(struct hclge_fd_rule, tuples.vlan_tag1), + offsetof(struct hclge_fd_rule, tuples_mask.vlan_tag1) }, + { INNER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 }, + { INNER_ETH_TYPE, 16, KEY_OPT_LE16, + offsetof(struct hclge_fd_rule, tuples.ether_proto), + offsetof(struct hclge_fd_rule, tuples_mask.ether_proto) }, + { INNER_L2_RSV, 16, KEY_OPT_LE16, + offsetof(struct hclge_fd_rule, tuples.l2_user_def), + offsetof(struct hclge_fd_rule, tuples_mask.l2_user_def) }, + { INNER_IP_TOS, 8, KEY_OPT_U8, + offsetof(struct hclge_fd_rule, tuples.ip_tos), + offsetof(struct hclge_fd_rule, tuples_mask.ip_tos) }, + { INNER_IP_PROTO, 8, KEY_OPT_U8, + offsetof(struct hclge_fd_rule, tuples.ip_proto), + offsetof(struct hclge_fd_rule, tuples_mask.ip_proto) }, + { INNER_SRC_IP, 32, KEY_OPT_IP, + offsetof(struct hclge_fd_rule, tuples.src_ip), + offsetof(struct hclge_fd_rule, tuples_mask.src_ip) }, + { INNER_DST_IP, 32, KEY_OPT_IP, + offsetof(struct hclge_fd_rule, tuples.dst_ip), + offsetof(struct hclge_fd_rule, tuples_mask.dst_ip) }, + { INNER_L3_RSV, 16, KEY_OPT_LE16, + offsetof(struct hclge_fd_rule, tuples.l3_user_def), + offsetof(struct hclge_fd_rule, tuples_mask.l3_user_def) }, + { INNER_SRC_PORT, 16, KEY_OPT_LE16, + offsetof(struct hclge_fd_rule, tuples.src_port), + offsetof(struct hclge_fd_rule, tuples_mask.src_port) }, + { INNER_DST_PORT, 16, KEY_OPT_LE16, + offsetof(struct hclge_fd_rule, tuples.dst_port), + offsetof(struct hclge_fd_rule, tuples_mask.dst_port) }, + { INNER_L4_RSV, 32, KEY_OPT_LE32, + offsetof(struct hclge_fd_rule, tuples.l4_user_def), + offsetof(struct hclge_fd_rule, tuples_mask.l4_user_def) }, +}; + +/** + * hclge_cmd_send - send command to command queue + * @hw: pointer to the hw struct + * @desc: prefilled descriptor for describing the command + * @num : the number of descriptors to be sent + * + * This is the main send command for command queue, it + * sends the queue, cleans the queue, etc + **/ +int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num) +{ + return hclge_comm_cmd_send(&hw->hw, desc, num); +} + +static int hclge_mac_update_stats_defective(struct hclge_dev *hdev) +{ +#define HCLGE_MAC_CMD_NUM 21 + + u64 *data = (u64 *)(&hdev->mac_stats); + struct hclge_desc desc[HCLGE_MAC_CMD_NUM]; + __le64 *desc_data; + u32 data_size; + int ret; + u32 i; + + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true); + ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM); + if (ret) { + dev_err(&hdev->pdev->dev, + "Get MAC pkt stats fail, status = %d.\n", ret); + + return ret; + } + + /* The first desc has a 64-bit header, so data size need to minus 1 */ + data_size = sizeof(desc) / (sizeof(u64)) - 1; + + desc_data = (__le64 *)(&desc[0].data[0]); + for (i = 0; i < data_size; i++) { + /* data memory is continuous becase only the first desc has a + * header in this command + */ + *data += le64_to_cpu(*desc_data); + data++; + desc_data++; + } + + return 0; +} + +static int hclge_mac_update_stats_complete(struct hclge_dev *hdev) +{ +#define HCLGE_REG_NUM_PER_DESC 4 + + u32 reg_num = hdev->ae_dev->dev_specs.mac_stats_num; + u64 *data = (u64 *)(&hdev->mac_stats); + struct hclge_desc *desc; + __le64 *desc_data; + u32 data_size; + u32 desc_num; + int ret; + u32 i; + + /* The first desc has a 64-bit header, so need to consider it */ + desc_num = reg_num / HCLGE_REG_NUM_PER_DESC + 1; + + /* This may be called inside atomic sections, + * so GFP_ATOMIC is more suitalbe here + */ + desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC); + if (!desc) + return -ENOMEM; + + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true); + ret = hclge_cmd_send(&hdev->hw, desc, desc_num); + if (ret) { + kfree(desc); + return ret; + } + + data_size = min_t(u32, sizeof(hdev->mac_stats) / sizeof(u64), reg_num); + + desc_data = (__le64 *)(&desc[0].data[0]); + for (i = 0; i < data_size; i++) { + /* data memory is continuous becase only the first desc has a + * header in this command + */ + *data += le64_to_cpu(*desc_data); + data++; + desc_data++; + } + + kfree(desc); + + return 0; +} + +static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *reg_num) +{ + struct hclge_desc desc; + int ret; + + /* Driver needs total register number of both valid registers and + * reserved registers, but the old firmware only returns number + * of valid registers in device V2. To be compatible with these + * devices, driver uses a fixed value. + */ + if (hdev->ae_dev->dev_version == HNAE3_DEVICE_VERSION_V2) { + *reg_num = HCLGE_MAC_STATS_MAX_NUM_V1; + return 0; + } + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to query mac statistic reg number, ret = %d\n", + ret); + return ret; + } + + *reg_num = le32_to_cpu(desc.data[0]); + if (*reg_num == 0) { + dev_err(&hdev->pdev->dev, + "mac statistic reg number is invalid!\n"); + return -ENODATA; + } + + return 0; +} + +int hclge_mac_update_stats(struct hclge_dev *hdev) +{ + /* The firmware supports the new statistics acquisition method */ + if (hdev->ae_dev->dev_specs.mac_stats_num) + return hclge_mac_update_stats_complete(hdev); + else + return hclge_mac_update_stats_defective(hdev); +} + +static int hclge_comm_get_count(struct hclge_dev *hdev, + const struct hclge_comm_stats_str strs[], + u32 size) +{ + int count = 0; + u32 i; + + for (i = 0; i < size; i++) + if (strs[i].stats_num <= hdev->ae_dev->dev_specs.mac_stats_num) + count++; + + return count; +} + +static u64 *hclge_comm_get_stats(struct hclge_dev *hdev, + const struct hclge_comm_stats_str strs[], + int size, u64 *data) +{ + u64 *buf = data; + u32 i; + + for (i = 0; i < size; i++) { + if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num) + continue; + + *buf = HCLGE_STATS_READ(&hdev->mac_stats, strs[i].offset); + buf++; + } + + return buf; +} + +static u8 *hclge_comm_get_strings(struct hclge_dev *hdev, u32 stringset, + const struct hclge_comm_stats_str strs[], + int size, u8 *data) +{ + char *buff = (char *)data; + u32 i; + + if (stringset != ETH_SS_STATS) + return buff; + + for (i = 0; i < size; i++) { + if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num) + continue; + + snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc); + buff = buff + ETH_GSTRING_LEN; + } + + return (u8 *)buff; +} + +static void hclge_update_stats_for_all(struct hclge_dev *hdev) +{ + struct hnae3_handle *handle; + int status; + + handle = &hdev->vport[0].nic; + if (handle->client) { + status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw); + if (status) { + dev_err(&hdev->pdev->dev, + "Update TQPS stats fail, status = %d.\n", + status); + } + } + + hclge_update_fec_stats(hdev); + + status = hclge_mac_update_stats(hdev); + if (status) + dev_err(&hdev->pdev->dev, + "Update MAC stats fail, status = %d.\n", status); +} + +static void hclge_update_stats(struct hnae3_handle *handle, + struct net_device_stats *net_stats) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + int status; + + if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state)) + return; + + status = hclge_mac_update_stats(hdev); + if (status) + dev_err(&hdev->pdev->dev, + "Update MAC stats fail, status = %d.\n", + status); + + status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw); + if (status) + dev_err(&hdev->pdev->dev, + "Update TQPS stats fail, status = %d.\n", + status); + + clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state); +} + +static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset) +{ +#define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK | \ + HNAE3_SUPPORT_PHY_LOOPBACK | \ + HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK | \ + HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK | \ + HNAE3_SUPPORT_EXTERNAL_LOOPBACK) + + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + int count = 0; + + /* Loopback test support rules: + * mac: only GE mode support + * serdes: all mac mode will support include GE/XGE/LGE/CGE + * phy: only support when phy device exist on board + */ + if (stringset == ETH_SS_TEST) { + /* clear loopback bit flags at first */ + handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS)); + if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 || + hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M || + hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M || + hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) { + count += 1; + handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK; + } + + count += 1; + handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK; + count += 1; + handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK; + count += 1; + handle->flags |= HNAE3_SUPPORT_EXTERNAL_LOOPBACK; + + if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv && + hdev->hw.mac.phydev->drv->set_loopback) || + hnae3_dev_phy_imp_supported(hdev)) { + count += 1; + handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK; + } + } else if (stringset == ETH_SS_STATS) { + count = hclge_comm_get_count(hdev, g_mac_stats_string, + ARRAY_SIZE(g_mac_stats_string)) + + hclge_comm_tqps_get_sset_count(handle); + } + + return count; +} + +static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset, + u8 *data) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + u8 *p = (char *)data; + int size; + + if (stringset == ETH_SS_STATS) { + size = ARRAY_SIZE(g_mac_stats_string); + p = hclge_comm_get_strings(hdev, stringset, g_mac_stats_string, + size, p); + p = hclge_comm_tqps_get_strings(handle, p); + } else if (stringset == ETH_SS_TEST) { + if (handle->flags & HNAE3_SUPPORT_EXTERNAL_LOOPBACK) { + memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_EXTERNAL], + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) { + memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP], + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) { + memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES], + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) { + memcpy(p, + hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES], + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) { + memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY], + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + } +} + +static void hclge_get_stats(struct hnae3_handle *handle, u64 *data) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + u64 *p; + + p = hclge_comm_get_stats(hdev, g_mac_stats_string, + ARRAY_SIZE(g_mac_stats_string), data); + p = hclge_comm_tqps_get_stats(handle, p); +} + +static void hclge_get_mac_stat(struct hnae3_handle *handle, + struct hns3_mac_stats *mac_stats) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + hclge_update_stats(handle, NULL); + + mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num; + mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num; +} + +static int hclge_parse_func_status(struct hclge_dev *hdev, + struct hclge_func_status_cmd *status) +{ +#define HCLGE_MAC_ID_MASK 0xF + + if (!(status->pf_state & HCLGE_PF_STATE_DONE)) + return -EINVAL; + + /* Set the pf to main pf */ + if (status->pf_state & HCLGE_PF_STATE_MAIN) + hdev->flag |= HCLGE_FLAG_MAIN; + else + hdev->flag &= ~HCLGE_FLAG_MAIN; + + hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK; + return 0; +} + +static int hclge_query_function_status(struct hclge_dev *hdev) +{ +#define HCLGE_QUERY_MAX_CNT 5 + + struct hclge_func_status_cmd *req; + struct hclge_desc desc; + int timeout = 0; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true); + req = (struct hclge_func_status_cmd *)desc.data; + + do { + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "query function status failed %d.\n", ret); + return ret; + } + + /* Check pf reset is done */ + if (req->pf_state) + break; + usleep_range(1000, 2000); + } while (timeout++ < HCLGE_QUERY_MAX_CNT); + + return hclge_parse_func_status(hdev, req); +} + +static int hclge_query_pf_resource(struct hclge_dev *hdev) +{ + struct hclge_pf_res_cmd *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "query pf resource failed %d.\n", ret); + return ret; + } + + req = (struct hclge_pf_res_cmd *)desc.data; + hdev->num_tqps = le16_to_cpu(req->tqp_num) + + le16_to_cpu(req->ext_tqp_num); + hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S; + + if (req->tx_buf_size) + hdev->tx_buf_size = + le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S; + else + hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF; + + hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT); + + if (req->dv_buf_size) + hdev->dv_buf_size = + le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S; + else + hdev->dv_buf_size = HCLGE_DEFAULT_DV; + + hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT); + + hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic); + if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) { + dev_err(&hdev->pdev->dev, + "only %u msi resources available, not enough for pf(min:2).\n", + hdev->num_nic_msi); + return -EINVAL; + } + + if (hnae3_dev_roce_supported(hdev)) { + hdev->num_roce_msi = + le16_to_cpu(req->pf_intr_vector_number_roce); + + /* PF should have NIC vectors and Roce vectors, + * NIC vectors are queued before Roce vectors. + */ + hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi; + } else { + hdev->num_msi = hdev->num_nic_msi; + } + + return 0; +} + +static int hclge_parse_speed(u8 speed_cmd, u32 *speed) +{ + switch (speed_cmd) { + case HCLGE_FW_MAC_SPEED_10M: + *speed = HCLGE_MAC_SPEED_10M; + break; + case HCLGE_FW_MAC_SPEED_100M: + *speed = HCLGE_MAC_SPEED_100M; + break; + case HCLGE_FW_MAC_SPEED_1G: + *speed = HCLGE_MAC_SPEED_1G; + break; + case HCLGE_FW_MAC_SPEED_10G: + *speed = HCLGE_MAC_SPEED_10G; + break; + case HCLGE_FW_MAC_SPEED_25G: + *speed = HCLGE_MAC_SPEED_25G; + break; + case HCLGE_FW_MAC_SPEED_40G: + *speed = HCLGE_MAC_SPEED_40G; + break; + case HCLGE_FW_MAC_SPEED_50G: + *speed = HCLGE_MAC_SPEED_50G; + break; + case HCLGE_FW_MAC_SPEED_100G: + *speed = HCLGE_MAC_SPEED_100G; + break; + case HCLGE_FW_MAC_SPEED_200G: + *speed = HCLGE_MAC_SPEED_200G; + break; + default: + return -EINVAL; + } + + return 0; +} + +static const struct hclge_speed_bit_map speed_bit_map[] = { + {HCLGE_MAC_SPEED_10M, HCLGE_SUPPORT_10M_BIT}, + {HCLGE_MAC_SPEED_100M, HCLGE_SUPPORT_100M_BIT}, + {HCLGE_MAC_SPEED_1G, HCLGE_SUPPORT_1G_BIT}, + {HCLGE_MAC_SPEED_10G, HCLGE_SUPPORT_10G_BIT}, + {HCLGE_MAC_SPEED_25G, HCLGE_SUPPORT_25G_BIT}, + {HCLGE_MAC_SPEED_40G, HCLGE_SUPPORT_40G_BIT}, + {HCLGE_MAC_SPEED_50G, HCLGE_SUPPORT_50G_BIT}, + {HCLGE_MAC_SPEED_100G, HCLGE_SUPPORT_100G_BIT}, + {HCLGE_MAC_SPEED_200G, HCLGE_SUPPORT_200G_BIT}, +}; + +static int hclge_get_speed_bit(u32 speed, u32 *speed_bit) +{ + u16 i; + + for (i = 0; i < ARRAY_SIZE(speed_bit_map); i++) { + if (speed == speed_bit_map[i].speed) { + *speed_bit = speed_bit_map[i].speed_bit; + return 0; + } + } + + return -EINVAL; +} + +static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + u32 speed_ability = hdev->hw.mac.speed_ability; + u32 speed_bit = 0; + int ret; + + ret = hclge_get_speed_bit(speed, &speed_bit); + if (ret) + return ret; + + if (speed_bit & speed_ability) + return 0; + + return -EINVAL; +} + +static void hclge_update_fec_support(struct hclge_mac *mac) +{ + linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported); + linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported); + linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT, mac->supported); + linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported); + + if (mac->fec_ability & BIT(HNAE3_FEC_BASER)) + linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, + mac->supported); + if (mac->fec_ability & BIT(HNAE3_FEC_RS)) + linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, + mac->supported); + if (mac->fec_ability & BIT(HNAE3_FEC_LLRS)) + linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT, + mac->supported); + if (mac->fec_ability & BIT(HNAE3_FEC_NONE)) + linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, + mac->supported); +} + +static void hclge_convert_setting_sr(u16 speed_ability, + unsigned long *link_mode) +{ + if (speed_ability & HCLGE_SUPPORT_10G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, + link_mode); + if (speed_ability & HCLGE_SUPPORT_25G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, + link_mode); + if (speed_ability & HCLGE_SUPPORT_40G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, + link_mode); + if (speed_ability & HCLGE_SUPPORT_50G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, + link_mode); + if (speed_ability & HCLGE_SUPPORT_100G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, + link_mode); + if (speed_ability & HCLGE_SUPPORT_200G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT, + link_mode); +} + +static void hclge_convert_setting_lr(u16 speed_ability, + unsigned long *link_mode) +{ + if (speed_ability & HCLGE_SUPPORT_10G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, + link_mode); + if (speed_ability & HCLGE_SUPPORT_25G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, + link_mode); + if (speed_ability & HCLGE_SUPPORT_50G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, + link_mode); + if (speed_ability & HCLGE_SUPPORT_40G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, + link_mode); + if (speed_ability & HCLGE_SUPPORT_100G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, + link_mode); + if (speed_ability & HCLGE_SUPPORT_200G_BIT) + linkmode_set_bit( + ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT, + link_mode); +} + +static void hclge_convert_setting_cr(u16 speed_ability, + unsigned long *link_mode) +{ + if (speed_ability & HCLGE_SUPPORT_10G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, + link_mode); + if (speed_ability & HCLGE_SUPPORT_25G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, + link_mode); + if (speed_ability & HCLGE_SUPPORT_40G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, + link_mode); + if (speed_ability & HCLGE_SUPPORT_50G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, + link_mode); + if (speed_ability & HCLGE_SUPPORT_100G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, + link_mode); + if (speed_ability & HCLGE_SUPPORT_200G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT, + link_mode); +} + +static void hclge_convert_setting_kr(u16 speed_ability, + unsigned long *link_mode) +{ + if (speed_ability & HCLGE_SUPPORT_1G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, + link_mode); + if (speed_ability & HCLGE_SUPPORT_10G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, + link_mode); + if (speed_ability & HCLGE_SUPPORT_25G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, + link_mode); + if (speed_ability & HCLGE_SUPPORT_40G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, + link_mode); + if (speed_ability & HCLGE_SUPPORT_50G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, + link_mode); + if (speed_ability & HCLGE_SUPPORT_100G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, + link_mode); + if (speed_ability & HCLGE_SUPPORT_200G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT, + link_mode); +} + +static void hclge_convert_setting_fec(struct hclge_mac *mac) +{ + /* If firmware has reported fec_ability, don't need to convert by speed */ + if (mac->fec_ability) + goto out; + + switch (mac->speed) { + case HCLGE_MAC_SPEED_10G: + case HCLGE_MAC_SPEED_40G: + mac->fec_ability = BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO) | + BIT(HNAE3_FEC_NONE); + break; + case HCLGE_MAC_SPEED_25G: + case HCLGE_MAC_SPEED_50G: + mac->fec_ability = BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) | + BIT(HNAE3_FEC_AUTO) | BIT(HNAE3_FEC_NONE); + break; + case HCLGE_MAC_SPEED_100G: + mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO) | + BIT(HNAE3_FEC_NONE); + break; + case HCLGE_MAC_SPEED_200G: + mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO) | + BIT(HNAE3_FEC_LLRS); + break; + default: + mac->fec_ability = 0; + break; + } + +out: + hclge_update_fec_support(mac); +} + +static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev, + u16 speed_ability) +{ + struct hclge_mac *mac = &hdev->hw.mac; + + if (speed_ability & HCLGE_SUPPORT_1G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, + mac->supported); + + hclge_convert_setting_sr(speed_ability, mac->supported); + hclge_convert_setting_lr(speed_ability, mac->supported); + hclge_convert_setting_cr(speed_ability, mac->supported); + if (hnae3_dev_fec_supported(hdev)) + hclge_convert_setting_fec(mac); + + if (hnae3_dev_pause_supported(hdev)) + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported); + + linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported); +} + +static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev, + u16 speed_ability) +{ + struct hclge_mac *mac = &hdev->hw.mac; + + hclge_convert_setting_kr(speed_ability, mac->supported); + if (hnae3_dev_fec_supported(hdev)) + hclge_convert_setting_fec(mac); + + if (hnae3_dev_pause_supported(hdev)) + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported); + + linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported); +} + +static void hclge_parse_copper_link_mode(struct hclge_dev *hdev, + u16 speed_ability) +{ + unsigned long *supported = hdev->hw.mac.supported; + + /* default to support all speed for GE port */ + if (!speed_ability) + speed_ability = HCLGE_SUPPORT_GE; + + if (speed_ability & HCLGE_SUPPORT_1G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + supported); + + if (speed_ability & HCLGE_SUPPORT_100M_BIT) { + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, + supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, + supported); + } + + if (speed_ability & HCLGE_SUPPORT_10M_BIT) { + linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported); + } + + if (hnae3_dev_pause_supported(hdev)) { + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported); + } + + linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported); +} + +static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability) +{ + u8 media_type = hdev->hw.mac.media_type; + + if (media_type == HNAE3_MEDIA_TYPE_FIBER) + hclge_parse_fiber_link_mode(hdev, speed_ability); + else if (media_type == HNAE3_MEDIA_TYPE_COPPER) + hclge_parse_copper_link_mode(hdev, speed_ability); + else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE) + hclge_parse_backplane_link_mode(hdev, speed_ability); +} + +static u32 hclge_get_max_speed(u16 speed_ability) +{ + if (speed_ability & HCLGE_SUPPORT_200G_BIT) + return HCLGE_MAC_SPEED_200G; + + if (speed_ability & HCLGE_SUPPORT_100G_BIT) + return HCLGE_MAC_SPEED_100G; + + if (speed_ability & HCLGE_SUPPORT_50G_BIT) + return HCLGE_MAC_SPEED_50G; + + if (speed_ability & HCLGE_SUPPORT_40G_BIT) + return HCLGE_MAC_SPEED_40G; + + if (speed_ability & HCLGE_SUPPORT_25G_BIT) + return HCLGE_MAC_SPEED_25G; + + if (speed_ability & HCLGE_SUPPORT_10G_BIT) + return HCLGE_MAC_SPEED_10G; + + if (speed_ability & HCLGE_SUPPORT_1G_BIT) + return HCLGE_MAC_SPEED_1G; + + if (speed_ability & HCLGE_SUPPORT_100M_BIT) + return HCLGE_MAC_SPEED_100M; + + if (speed_ability & HCLGE_SUPPORT_10M_BIT) + return HCLGE_MAC_SPEED_10M; + + return HCLGE_MAC_SPEED_1G; +} + +static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) +{ +#define HCLGE_TX_SPARE_SIZE_UNIT 4096 +#define SPEED_ABILITY_EXT_SHIFT 8 + + struct hclge_cfg_param_cmd *req; + u64 mac_addr_tmp_high; + u16 speed_ability_ext; + u64 mac_addr_tmp; + unsigned int i; + + req = (struct hclge_cfg_param_cmd *)desc[0].data; + + /* get the configuration */ + cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]), + HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S); + cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]), + HCLGE_CFG_TQP_DESC_N_M, + HCLGE_CFG_TQP_DESC_N_S); + + cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]), + HCLGE_CFG_PHY_ADDR_M, + HCLGE_CFG_PHY_ADDR_S); + cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]), + HCLGE_CFG_MEDIA_TP_M, + HCLGE_CFG_MEDIA_TP_S); + cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]), + HCLGE_CFG_RX_BUF_LEN_M, + HCLGE_CFG_RX_BUF_LEN_S); + /* get mac_address */ + mac_addr_tmp = __le32_to_cpu(req->param[2]); + mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]), + HCLGE_CFG_MAC_ADDR_H_M, + HCLGE_CFG_MAC_ADDR_H_S); + + mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1; + + cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]), + HCLGE_CFG_DEFAULT_SPEED_M, + HCLGE_CFG_DEFAULT_SPEED_S); + cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]), + HCLGE_CFG_RSS_SIZE_M, + HCLGE_CFG_RSS_SIZE_S); + + for (i = 0; i < ETH_ALEN; i++) + cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff; + + req = (struct hclge_cfg_param_cmd *)desc[1].data; + cfg->numa_node_map = __le32_to_cpu(req->param[0]); + + cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]), + HCLGE_CFG_SPEED_ABILITY_M, + HCLGE_CFG_SPEED_ABILITY_S); + speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]), + HCLGE_CFG_SPEED_ABILITY_EXT_M, + HCLGE_CFG_SPEED_ABILITY_EXT_S); + cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT; + + cfg->vlan_fliter_cap = hnae3_get_field(__le32_to_cpu(req->param[1]), + HCLGE_CFG_VLAN_FLTR_CAP_M, + HCLGE_CFG_VLAN_FLTR_CAP_S); + + cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]), + HCLGE_CFG_UMV_TBL_SPACE_M, + HCLGE_CFG_UMV_TBL_SPACE_S); + + cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]), + HCLGE_CFG_PF_RSS_SIZE_M, + HCLGE_CFG_PF_RSS_SIZE_S); + + /* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a + * power of 2, instead of reading out directly. This would + * be more flexible for future changes and expansions. + * When VF max rss size field is HCLGE_CFG_RSS_SIZE_S, + * it does not make sense if PF's field is 0. In this case, PF and VF + * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S. + */ + cfg->pf_rss_size_max = cfg->pf_rss_size_max ? + 1U << cfg->pf_rss_size_max : + cfg->vf_rss_size_max; + + /* The unit of the tx spare buffer size queried from configuration + * file is HCLGE_TX_SPARE_SIZE_UNIT(4096) bytes, so a conversion is + * needed here. + */ + cfg->tx_spare_buf_size = hnae3_get_field(__le32_to_cpu(req->param[2]), + HCLGE_CFG_TX_SPARE_BUF_SIZE_M, + HCLGE_CFG_TX_SPARE_BUF_SIZE_S); + cfg->tx_spare_buf_size *= HCLGE_TX_SPARE_SIZE_UNIT; +} + +/* hclge_get_cfg: query the static parameter from flash + * @hdev: pointer to struct hclge_dev + * @hcfg: the config structure to be getted + */ +static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg) +{ + struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM]; + struct hclge_cfg_param_cmd *req; + unsigned int i; + int ret; + + for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) { + u32 offset = 0; + + req = (struct hclge_cfg_param_cmd *)desc[i].data; + hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM, + true); + hnae3_set_field(offset, HCLGE_CFG_OFFSET_M, + HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES); + /* Len should be united by 4 bytes when send to hardware */ + hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S, + HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT); + req->offset = cpu_to_le32(offset); + } + + ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM); + if (ret) { + dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret); + return ret; + } + + hclge_parse_cfg(hcfg, desc); + + return 0; +} + +static void hclge_set_default_dev_specs(struct hclge_dev *hdev) +{ +#define HCLGE_MAX_NON_TSO_BD_NUM 8U + + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); + + ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM; + ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE; + ae_dev->dev_specs.rss_key_size = HCLGE_COMM_RSS_KEY_SIZE; + ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE; + ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL; + ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME; + ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM; + ae_dev->dev_specs.umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF; +} + +static void hclge_parse_dev_specs(struct hclge_dev *hdev, + struct hclge_desc *desc) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); + struct hclge_dev_specs_0_cmd *req0; + struct hclge_dev_specs_1_cmd *req1; + + req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data; + req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data; + + ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num; + ae_dev->dev_specs.rss_ind_tbl_size = + le16_to_cpu(req0->rss_ind_tbl_size); + ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max); + ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size); + ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate); + ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num); + ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl); + ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size); + ae_dev->dev_specs.umv_size = le16_to_cpu(req1->umv_size); + ae_dev->dev_specs.mc_mac_size = le16_to_cpu(req1->mc_mac_size); +} + +static void hclge_check_dev_specs(struct hclge_dev *hdev) +{ + struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs; + + if (!dev_specs->max_non_tso_bd_num) + dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM; + if (!dev_specs->rss_ind_tbl_size) + dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE; + if (!dev_specs->rss_key_size) + dev_specs->rss_key_size = HCLGE_COMM_RSS_KEY_SIZE; + if (!dev_specs->max_tm_rate) + dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE; + if (!dev_specs->max_qset_num) + dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM; + if (!dev_specs->max_int_gl) + dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL; + if (!dev_specs->max_frm_size) + dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME; + if (!dev_specs->umv_size) + dev_specs->umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF; +} + +static int hclge_query_mac_stats_num(struct hclge_dev *hdev) +{ + u32 reg_num = 0; + int ret; + + ret = hclge_mac_query_reg_num(hdev, ®_num); + if (ret && ret != -EOPNOTSUPP) + return ret; + + hdev->ae_dev->dev_specs.mac_stats_num = reg_num; + return 0; +} + +static int hclge_query_dev_specs(struct hclge_dev *hdev) +{ + struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM]; + int ret; + int i; + + ret = hclge_query_mac_stats_num(hdev); + if (ret) + return ret; + + /* set default specifications as devices lower than version V3 do not + * support querying specifications from firmware. + */ + if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) { + hclge_set_default_dev_specs(hdev); + return 0; + } + + for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) { + hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, + true); + desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + } + hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true); + + ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM); + if (ret) + return ret; + + hclge_parse_dev_specs(hdev, desc); + hclge_check_dev_specs(hdev); + + return 0; +} + +static int hclge_get_cap(struct hclge_dev *hdev) +{ + int ret; + + ret = hclge_query_function_status(hdev); + if (ret) { + dev_err(&hdev->pdev->dev, + "query function status error %d.\n", ret); + return ret; + } + + /* get pf resource */ + return hclge_query_pf_resource(hdev); +} + +static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev) +{ +#define HCLGE_MIN_TX_DESC 64 +#define HCLGE_MIN_RX_DESC 64 + + if (!is_kdump_kernel()) + return; + + dev_info(&hdev->pdev->dev, + "Running kdump kernel. Using minimal resources\n"); + + /* minimal queue pairs equals to the number of vports */ + hdev->num_tqps = hdev->num_req_vfs + 1; + hdev->num_tx_desc = HCLGE_MIN_TX_DESC; + hdev->num_rx_desc = HCLGE_MIN_RX_DESC; +} + +static void hclge_init_tc_config(struct hclge_dev *hdev) +{ + unsigned int i; + + if (hdev->tc_max > HNAE3_MAX_TC || + hdev->tc_max < 1) { + dev_warn(&hdev->pdev->dev, "TC num = %u.\n", + hdev->tc_max); + hdev->tc_max = 1; + } + + /* Dev does not support DCB */ + if (!hnae3_dev_dcb_supported(hdev)) { + hdev->tc_max = 1; + hdev->pfc_max = 0; + } else { + hdev->pfc_max = hdev->tc_max; + } + + hdev->tm_info.num_tc = 1; + + /* Currently not support uncontiuous tc */ + for (i = 0; i < hdev->tm_info.num_tc; i++) + hnae3_set_bit(hdev->hw_tc_map, i, 1); + + hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE; +} + +static int hclge_configure(struct hclge_dev *hdev) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); + struct hclge_cfg cfg; + int ret; + + ret = hclge_get_cfg(hdev, &cfg); + if (ret) + return ret; + + hdev->base_tqp_pid = 0; + hdev->vf_rss_size_max = cfg.vf_rss_size_max; + hdev->pf_rss_size_max = cfg.pf_rss_size_max; + hdev->rx_buf_len = cfg.rx_buf_len; + ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr); + hdev->hw.mac.media_type = cfg.media_type; + hdev->hw.mac.phy_addr = cfg.phy_addr; + hdev->num_tx_desc = cfg.tqp_desc_num; + hdev->num_rx_desc = cfg.tqp_desc_num; + hdev->tm_info.num_pg = 1; + hdev->tc_max = cfg.tc_num; + hdev->tm_info.hw_pfc_map = 0; + if (cfg.umv_space) + hdev->wanted_umv_size = cfg.umv_space; + else + hdev->wanted_umv_size = hdev->ae_dev->dev_specs.umv_size; + hdev->tx_spare_buf_size = cfg.tx_spare_buf_size; + hdev->gro_en = true; + if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF) + set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps); + + if (hnae3_ae_dev_fd_supported(hdev->ae_dev)) { + hdev->fd_en = true; + hdev->fd_active_type = HCLGE_FD_RULE_NONE; + } + + ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed); + if (ret) { + dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n", + cfg.default_speed, ret); + return ret; + } + + hclge_parse_link_mode(hdev, cfg.speed_ability); + + hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability); + + hclge_init_tc_config(hdev); + hclge_init_kdump_kernel_config(hdev); + + return ret; +} + +static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min, + u16 tso_mss_max) +{ + struct hclge_cfg_tso_status_cmd *req; + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false); + + req = (struct hclge_cfg_tso_status_cmd *)desc.data; + req->tso_mss_min = cpu_to_le16(tso_mss_min); + req->tso_mss_max = cpu_to_le16(tso_mss_max); + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_config_gro(struct hclge_dev *hdev) +{ + struct hclge_cfg_gro_status_cmd *req; + struct hclge_desc desc; + int ret; + + if (!hnae3_ae_dev_gro_supported(hdev->ae_dev)) + return 0; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false); + req = (struct hclge_cfg_gro_status_cmd *)desc.data; + + req->gro_en = hdev->gro_en ? 1 : 0; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "GRO hardware config cmd failed, ret = %d\n", ret); + + return ret; +} + +static int hclge_alloc_tqps(struct hclge_dev *hdev) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); + struct hclge_comm_tqp *tqp; + int i; + + hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, + sizeof(struct hclge_comm_tqp), GFP_KERNEL); + if (!hdev->htqp) + return -ENOMEM; + + tqp = hdev->htqp; + + for (i = 0; i < hdev->num_tqps; i++) { + tqp->dev = &hdev->pdev->dev; + tqp->index = i; + + tqp->q.ae_algo = &ae_algo; + tqp->q.buf_size = hdev->rx_buf_len; + tqp->q.tx_desc_num = hdev->num_tx_desc; + tqp->q.rx_desc_num = hdev->num_rx_desc; + + /* need an extended offset to configure queues >= + * HCLGE_TQP_MAX_SIZE_DEV_V2 + */ + if (i < HCLGE_TQP_MAX_SIZE_DEV_V2) + tqp->q.io_base = hdev->hw.hw.io_base + + HCLGE_TQP_REG_OFFSET + + i * HCLGE_TQP_REG_SIZE; + else + tqp->q.io_base = hdev->hw.hw.io_base + + HCLGE_TQP_REG_OFFSET + + HCLGE_TQP_EXT_REG_OFFSET + + (i - HCLGE_TQP_MAX_SIZE_DEV_V2) * + HCLGE_TQP_REG_SIZE; + + /* when device supports tx push and has device memory, + * the queue can execute push mode or doorbell mode on + * device memory. + */ + if (test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps)) + tqp->q.mem_base = hdev->hw.hw.mem_base + + HCLGE_TQP_MEM_OFFSET(hdev, i); + + tqp++; + } + + return 0; +} + +static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id, + u16 tqp_pid, u16 tqp_vid, bool is_pf) +{ + struct hclge_tqp_map_cmd *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false); + + req = (struct hclge_tqp_map_cmd *)desc.data; + req->tqp_id = cpu_to_le16(tqp_pid); + req->tqp_vf = func_id; + req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B; + if (!is_pf) + req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B; + req->tqp_vid = cpu_to_le16(tqp_vid); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret); + + return ret; +} + +static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps) +{ + struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; + struct hclge_dev *hdev = vport->back; + int i, alloced; + + for (i = 0, alloced = 0; i < hdev->num_tqps && + alloced < num_tqps; i++) { + if (!hdev->htqp[i].alloced) { + hdev->htqp[i].q.handle = &vport->nic; + hdev->htqp[i].q.tqp_index = alloced; + hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc; + hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc; + kinfo->tqp[alloced] = &hdev->htqp[i].q; + hdev->htqp[i].alloced = true; + alloced++; + } + } + vport->alloc_tqps = alloced; + kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max, + vport->alloc_tqps / hdev->tm_info.num_tc); + + /* ensure one to one mapping between irq and queue at default */ + kinfo->rss_size = min_t(u16, kinfo->rss_size, + (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc); + + return 0; +} + +static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps, + u16 num_tx_desc, u16 num_rx_desc) + +{ + struct hnae3_handle *nic = &vport->nic; + struct hnae3_knic_private_info *kinfo = &nic->kinfo; + struct hclge_dev *hdev = vport->back; + int ret; + + kinfo->num_tx_desc = num_tx_desc; + kinfo->num_rx_desc = num_rx_desc; + + kinfo->rx_buf_len = hdev->rx_buf_len; + kinfo->tx_spare_buf_size = hdev->tx_spare_buf_size; + + kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps, + sizeof(struct hnae3_queue *), GFP_KERNEL); + if (!kinfo->tqp) + return -ENOMEM; + + ret = hclge_assign_tqp(vport, num_tqps); + if (ret) + dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret); + + return ret; +} + +static int hclge_map_tqp_to_vport(struct hclge_dev *hdev, + struct hclge_vport *vport) +{ + struct hnae3_handle *nic = &vport->nic; + struct hnae3_knic_private_info *kinfo; + u16 i; + + kinfo = &nic->kinfo; + for (i = 0; i < vport->alloc_tqps; i++) { + struct hclge_comm_tqp *q = + container_of(kinfo->tqp[i], struct hclge_comm_tqp, q); + bool is_pf; + int ret; + + is_pf = !(vport->vport_id); + ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index, + i, is_pf); + if (ret) + return ret; + } + + return 0; +} + +static int hclge_map_tqp(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = hdev->vport; + u16 i, num_vport; + + num_vport = hdev->num_req_vfs + 1; + for (i = 0; i < num_vport; i++) { + int ret; + + ret = hclge_map_tqp_to_vport(hdev, vport); + if (ret) + return ret; + + vport++; + } + + return 0; +} + +static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps) +{ + struct hnae3_handle *nic = &vport->nic; + struct hclge_dev *hdev = vport->back; + int ret; + + nic->pdev = hdev->pdev; + nic->ae_algo = &ae_algo; + nic->numa_node_mask = hdev->numa_node_mask; + nic->kinfo.io_base = hdev->hw.hw.io_base; + + ret = hclge_knic_setup(vport, num_tqps, + hdev->num_tx_desc, hdev->num_rx_desc); + if (ret) + dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret); + + return ret; +} + +static int hclge_alloc_vport(struct hclge_dev *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + struct hclge_vport *vport; + u32 tqp_main_vport; + u32 tqp_per_vport; + int num_vport, i; + int ret; + + /* We need to alloc a vport for main NIC of PF */ + num_vport = hdev->num_req_vfs + 1; + + if (hdev->num_tqps < num_vport) { + dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)", + hdev->num_tqps, num_vport); + return -EINVAL; + } + + /* Alloc the same number of TQPs for every vport */ + tqp_per_vport = hdev->num_tqps / num_vport; + tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport; + + vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport), + GFP_KERNEL); + if (!vport) + return -ENOMEM; + + hdev->vport = vport; + hdev->num_alloc_vport = num_vport; + + if (IS_ENABLED(CONFIG_PCI_IOV)) + hdev->num_alloc_vfs = hdev->num_req_vfs; + + for (i = 0; i < num_vport; i++) { + vport->back = hdev; + vport->vport_id = i; + vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO; + vport->mps = HCLGE_MAC_DEFAULT_FRAME; + vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE; + vport->port_base_vlan_cfg.tbl_sta = true; + vport->rxvlan_cfg.rx_vlan_offload_en = true; + vport->req_vlan_fltr_en = true; + INIT_LIST_HEAD(&vport->vlan_list); + INIT_LIST_HEAD(&vport->uc_mac_list); + INIT_LIST_HEAD(&vport->mc_mac_list); + spin_lock_init(&vport->mac_list_lock); + + if (i == 0) + ret = hclge_vport_setup(vport, tqp_main_vport); + else + ret = hclge_vport_setup(vport, tqp_per_vport); + if (ret) { + dev_err(&pdev->dev, + "vport setup failed for vport %d, %d\n", + i, ret); + return ret; + } + + vport++; + } + + return 0; +} + +static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev, + struct hclge_pkt_buf_alloc *buf_alloc) +{ +/* TX buffer size is unit by 128 byte */ +#define HCLGE_BUF_SIZE_UNIT_SHIFT 7 +#define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15) + struct hclge_tx_buff_alloc_cmd *req; + struct hclge_desc desc; + int ret; + u8 i; + + req = (struct hclge_tx_buff_alloc_cmd *)desc.data; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0); + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { + u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size; + + req->tx_pkt_buff[i] = + cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) | + HCLGE_BUF_SIZE_UPDATE_EN_MSK); + } + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n", + ret); + + return ret; +} + +static int hclge_tx_buffer_alloc(struct hclge_dev *hdev, + struct hclge_pkt_buf_alloc *buf_alloc) +{ + int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc); + + if (ret) + dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret); + + return ret; +} + +static u32 hclge_get_tc_num(struct hclge_dev *hdev) +{ + unsigned int i; + u32 cnt = 0; + + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) + if (hdev->hw_tc_map & BIT(i)) + cnt++; + return cnt; +} + +/* Get the number of pfc enabled TCs, which have private buffer */ +static int hclge_get_pfc_priv_num(struct hclge_dev *hdev, + struct hclge_pkt_buf_alloc *buf_alloc) +{ + struct hclge_priv_buf *priv; + unsigned int i; + int cnt = 0; + + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { + priv = &buf_alloc->priv_buf[i]; + if ((hdev->tm_info.hw_pfc_map & BIT(i)) && + priv->enable) + cnt++; + } + + return cnt; +} + +/* Get the number of pfc disabled TCs, which have private buffer */ +static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev, + struct hclge_pkt_buf_alloc *buf_alloc) +{ + struct hclge_priv_buf *priv; + unsigned int i; + int cnt = 0; + + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { + priv = &buf_alloc->priv_buf[i]; + if (hdev->hw_tc_map & BIT(i) && + !(hdev->tm_info.hw_pfc_map & BIT(i)) && + priv->enable) + cnt++; + } + + return cnt; +} + +static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc) +{ + struct hclge_priv_buf *priv; + u32 rx_priv = 0; + int i; + + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { + priv = &buf_alloc->priv_buf[i]; + if (priv->enable) + rx_priv += priv->buf_size; + } + return rx_priv; +} + +static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc) +{ + u32 i, total_tx_size = 0; + + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) + total_tx_size += buf_alloc->priv_buf[i].tx_buf_size; + + return total_tx_size; +} + +static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, + struct hclge_pkt_buf_alloc *buf_alloc, + u32 rx_all) +{ + u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd; + u32 tc_num = hclge_get_tc_num(hdev); + u32 shared_buf, aligned_mps; + u32 rx_priv; + int i; + + aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT); + + if (hnae3_dev_dcb_supported(hdev)) + shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps + + hdev->dv_buf_size; + else + shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF + + hdev->dv_buf_size; + + shared_buf_tc = tc_num * aligned_mps + aligned_mps; + shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc), + HCLGE_BUF_SIZE_UNIT); + + rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc); + if (rx_all < rx_priv + shared_std) + return false; + + shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT); + buf_alloc->s_buf.buf_size = shared_buf; + if (hnae3_dev_dcb_supported(hdev)) { + buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size; + buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high + - roundup(aligned_mps / HCLGE_BUF_DIV_BY, + HCLGE_BUF_SIZE_UNIT); + } else { + buf_alloc->s_buf.self.high = aligned_mps + + HCLGE_NON_DCB_ADDITIONAL_BUF; + buf_alloc->s_buf.self.low = aligned_mps; + } + + if (hnae3_dev_dcb_supported(hdev)) { + hi_thrd = shared_buf - hdev->dv_buf_size; + + if (tc_num <= NEED_RESERVE_TC_NUM) + hi_thrd = hi_thrd * BUF_RESERVE_PERCENT + / BUF_MAX_PERCENT; + + if (tc_num) + hi_thrd = hi_thrd / tc_num; + + hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps); + hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT); + lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY; + } else { + hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF; + lo_thrd = aligned_mps; + } + + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { + buf_alloc->s_buf.tc_thrd[i].low = lo_thrd; + buf_alloc->s_buf.tc_thrd[i].high = hi_thrd; + } + + return true; +} + +static int hclge_tx_buffer_calc(struct hclge_dev *hdev, + struct hclge_pkt_buf_alloc *buf_alloc) +{ + u32 i, total_size; + + total_size = hdev->pkt_buf_size; + + /* alloc tx buffer for all enabled tc */ + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { + struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; + + if (hdev->hw_tc_map & BIT(i)) { + if (total_size < hdev->tx_buf_size) + return -ENOMEM; + + priv->tx_buf_size = hdev->tx_buf_size; + } else { + priv->tx_buf_size = 0; + } + + total_size -= priv->tx_buf_size; + } + + return 0; +} + +static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max, + struct hclge_pkt_buf_alloc *buf_alloc) +{ + u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); + u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT); + unsigned int i; + + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { + struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; + + priv->enable = 0; + priv->wl.low = 0; + priv->wl.high = 0; + priv->buf_size = 0; + + if (!(hdev->hw_tc_map & BIT(i))) + continue; + + priv->enable = 1; + + if (hdev->tm_info.hw_pfc_map & BIT(i)) { + priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT; + priv->wl.high = roundup(priv->wl.low + aligned_mps, + HCLGE_BUF_SIZE_UNIT); + } else { + priv->wl.low = 0; + priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) : + aligned_mps; + } + + priv->buf_size = priv->wl.high + hdev->dv_buf_size; + } + + return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all); +} + +static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev, + struct hclge_pkt_buf_alloc *buf_alloc) +{ + u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); + int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc); + int i; + + /* let the last to be cleared first */ + for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { + struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; + unsigned int mask = BIT((unsigned int)i); + + if (hdev->hw_tc_map & mask && + !(hdev->tm_info.hw_pfc_map & mask)) { + /* Clear the no pfc TC private buffer */ + priv->wl.low = 0; + priv->wl.high = 0; + priv->buf_size = 0; + priv->enable = 0; + no_pfc_priv_num--; + } + + if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) || + no_pfc_priv_num == 0) + break; + } + + return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all); +} + +static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev, + struct hclge_pkt_buf_alloc *buf_alloc) +{ + u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); + int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc); + int i; + + /* let the last to be cleared first */ + for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { + struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; + unsigned int mask = BIT((unsigned int)i); + + if (hdev->hw_tc_map & mask && + hdev->tm_info.hw_pfc_map & mask) { + /* Reduce the number of pfc TC with private buffer */ + priv->wl.low = 0; + priv->enable = 0; + priv->wl.high = 0; + priv->buf_size = 0; + pfc_priv_num--; + } + + if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) || + pfc_priv_num == 0) + break; + } + + return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all); +} + +static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev, + struct hclge_pkt_buf_alloc *buf_alloc) +{ +#define COMPENSATE_BUFFER 0x3C00 +#define COMPENSATE_HALF_MPS_NUM 5 +#define PRIV_WL_GAP 0x1800 + + u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); + u32 tc_num = hclge_get_tc_num(hdev); + u32 half_mps = hdev->mps >> 1; + u32 min_rx_priv; + unsigned int i; + + if (tc_num) + rx_priv = rx_priv / tc_num; + + if (tc_num <= NEED_RESERVE_TC_NUM) + rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT; + + min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER + + COMPENSATE_HALF_MPS_NUM * half_mps; + min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT); + rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT); + if (rx_priv < min_rx_priv) + return false; + + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { + struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; + + priv->enable = 0; + priv->wl.low = 0; + priv->wl.high = 0; + priv->buf_size = 0; + + if (!(hdev->hw_tc_map & BIT(i))) + continue; + + priv->enable = 1; + priv->buf_size = rx_priv; + priv->wl.high = rx_priv - hdev->dv_buf_size; + priv->wl.low = priv->wl.high - PRIV_WL_GAP; + } + + buf_alloc->s_buf.buf_size = 0; + + return true; +} + +/* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs + * @hdev: pointer to struct hclge_dev + * @buf_alloc: pointer to buffer calculation data + * @return: 0: calculate successful, negative: fail + */ +static int hclge_rx_buffer_calc(struct hclge_dev *hdev, + struct hclge_pkt_buf_alloc *buf_alloc) +{ + /* When DCB is not supported, rx private buffer is not allocated. */ + if (!hnae3_dev_dcb_supported(hdev)) { + u32 rx_all = hdev->pkt_buf_size; + + rx_all -= hclge_get_tx_buff_alloced(buf_alloc); + if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) + return -ENOMEM; + + return 0; + } + + if (hclge_only_alloc_priv_buff(hdev, buf_alloc)) + return 0; + + if (hclge_rx_buf_calc_all(hdev, true, buf_alloc)) + return 0; + + /* try to decrease the buffer size */ + if (hclge_rx_buf_calc_all(hdev, false, buf_alloc)) + return 0; + + if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc)) + return 0; + + if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc)) + return 0; + + return -ENOMEM; +} + +static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev, + struct hclge_pkt_buf_alloc *buf_alloc) +{ + struct hclge_rx_priv_buff_cmd *req; + struct hclge_desc desc; + int ret; + int i; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false); + req = (struct hclge_rx_priv_buff_cmd *)desc.data; + + /* Alloc private buffer TCs */ + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { + struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; + + req->buf_num[i] = + cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S); + req->buf_num[i] |= + cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B); + } + + req->shared_buf = + cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) | + (1 << HCLGE_TC0_PRI_BUF_EN_B)); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "rx private buffer alloc cmd failed %d\n", ret); + + return ret; +} + +static int hclge_rx_priv_wl_config(struct hclge_dev *hdev, + struct hclge_pkt_buf_alloc *buf_alloc) +{ + struct hclge_rx_priv_wl_buf *req; + struct hclge_priv_buf *priv; + struct hclge_desc desc[2]; + int i, j; + int ret; + + for (i = 0; i < 2; i++) { + hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC, + false); + req = (struct hclge_rx_priv_wl_buf *)desc[i].data; + + /* The first descriptor set the NEXT bit to 1 */ + if (i == 0) + desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + else + desc[i].flag &= ~cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + + for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) { + u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j; + + priv = &buf_alloc->priv_buf[idx]; + req->tc_wl[j].high = + cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S); + req->tc_wl[j].high |= + cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); + req->tc_wl[j].low = + cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S); + req->tc_wl[j].low |= + cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); + } + } + + /* Send 2 descriptor at one time */ + ret = hclge_cmd_send(&hdev->hw, desc, 2); + if (ret) + dev_err(&hdev->pdev->dev, + "rx private waterline config cmd failed %d\n", + ret); + return ret; +} + +static int hclge_common_thrd_config(struct hclge_dev *hdev, + struct hclge_pkt_buf_alloc *buf_alloc) +{ + struct hclge_shared_buf *s_buf = &buf_alloc->s_buf; + struct hclge_rx_com_thrd *req; + struct hclge_desc desc[2]; + struct hclge_tc_thrd *tc; + int i, j; + int ret; + + for (i = 0; i < 2; i++) { + hclge_cmd_setup_basic_desc(&desc[i], + HCLGE_OPC_RX_COM_THRD_ALLOC, false); + req = (struct hclge_rx_com_thrd *)&desc[i].data; + + /* The first descriptor set the NEXT bit to 1 */ + if (i == 0) + desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + else + desc[i].flag &= ~cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + + for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) { + tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j]; + + req->com_thrd[j].high = + cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S); + req->com_thrd[j].high |= + cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); + req->com_thrd[j].low = + cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S); + req->com_thrd[j].low |= + cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); + } + } + + /* Send 2 descriptors at one time */ + ret = hclge_cmd_send(&hdev->hw, desc, 2); + if (ret) + dev_err(&hdev->pdev->dev, + "common threshold config cmd failed %d\n", ret); + return ret; +} + +static int hclge_common_wl_config(struct hclge_dev *hdev, + struct hclge_pkt_buf_alloc *buf_alloc) +{ + struct hclge_shared_buf *buf = &buf_alloc->s_buf; + struct hclge_rx_com_wl *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false); + + req = (struct hclge_rx_com_wl *)desc.data; + req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S); + req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); + + req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S); + req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "common waterline config cmd failed %d\n", ret); + + return ret; +} + +int hclge_buffer_alloc(struct hclge_dev *hdev) +{ + struct hclge_pkt_buf_alloc *pkt_buf; + int ret; + + pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL); + if (!pkt_buf) + return -ENOMEM; + + ret = hclge_tx_buffer_calc(hdev, pkt_buf); + if (ret) { + dev_err(&hdev->pdev->dev, + "could not calc tx buffer size for all TCs %d\n", ret); + goto out; + } + + ret = hclge_tx_buffer_alloc(hdev, pkt_buf); + if (ret) { + dev_err(&hdev->pdev->dev, + "could not alloc tx buffers %d\n", ret); + goto out; + } + + ret = hclge_rx_buffer_calc(hdev, pkt_buf); + if (ret) { + dev_err(&hdev->pdev->dev, + "could not calc rx priv buffer size for all TCs %d\n", + ret); + goto out; + } + + ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf); + if (ret) { + dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n", + ret); + goto out; + } + + if (hnae3_dev_dcb_supported(hdev)) { + ret = hclge_rx_priv_wl_config(hdev, pkt_buf); + if (ret) { + dev_err(&hdev->pdev->dev, + "could not configure rx private waterline %d\n", + ret); + goto out; + } + + ret = hclge_common_thrd_config(hdev, pkt_buf); + if (ret) { + dev_err(&hdev->pdev->dev, + "could not configure common threshold %d\n", + ret); + goto out; + } + } + + ret = hclge_common_wl_config(hdev, pkt_buf); + if (ret) + dev_err(&hdev->pdev->dev, + "could not configure common waterline %d\n", ret); + +out: + kfree(pkt_buf); + return ret; +} + +static int hclge_init_roce_base_info(struct hclge_vport *vport) +{ + struct hnae3_handle *roce = &vport->roce; + struct hnae3_handle *nic = &vport->nic; + struct hclge_dev *hdev = vport->back; + + roce->rinfo.num_vectors = vport->back->num_roce_msi; + + if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi) + return -EINVAL; + + roce->rinfo.base_vector = hdev->num_nic_msi; + + roce->rinfo.netdev = nic->kinfo.netdev; + roce->rinfo.roce_io_base = hdev->hw.hw.io_base; + roce->rinfo.roce_mem_base = hdev->hw.hw.mem_base; + + roce->pdev = nic->pdev; + roce->ae_algo = nic->ae_algo; + roce->numa_node_mask = nic->numa_node_mask; + + return 0; +} + +static int hclge_init_msi(struct hclge_dev *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + int vectors; + int i; + + vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM, + hdev->num_msi, + PCI_IRQ_MSI | PCI_IRQ_MSIX); + if (vectors < 0) { + dev_err(&pdev->dev, + "failed(%d) to allocate MSI/MSI-X vectors\n", + vectors); + return vectors; + } + if (vectors < hdev->num_msi) + dev_warn(&hdev->pdev->dev, + "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n", + hdev->num_msi, vectors); + + hdev->num_msi = vectors; + hdev->num_msi_left = vectors; + + hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, + sizeof(u16), GFP_KERNEL); + if (!hdev->vector_status) { + pci_free_irq_vectors(pdev); + return -ENOMEM; + } + + for (i = 0; i < hdev->num_msi; i++) + hdev->vector_status[i] = HCLGE_INVALID_VPORT; + + hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, + sizeof(int), GFP_KERNEL); + if (!hdev->vector_irq) { + pci_free_irq_vectors(pdev); + return -ENOMEM; + } + + return 0; +} + +static u8 hclge_check_speed_dup(u8 duplex, int speed) +{ + if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M)) + duplex = HCLGE_MAC_FULL; + + return duplex; +} + +static struct hclge_mac_speed_map hclge_mac_speed_map_to_fw[] = { + {HCLGE_MAC_SPEED_10M, HCLGE_FW_MAC_SPEED_10M}, + {HCLGE_MAC_SPEED_100M, HCLGE_FW_MAC_SPEED_100M}, + {HCLGE_MAC_SPEED_1G, HCLGE_FW_MAC_SPEED_1G}, + {HCLGE_MAC_SPEED_10G, HCLGE_FW_MAC_SPEED_10G}, + {HCLGE_MAC_SPEED_25G, HCLGE_FW_MAC_SPEED_25G}, + {HCLGE_MAC_SPEED_40G, HCLGE_FW_MAC_SPEED_40G}, + {HCLGE_MAC_SPEED_50G, HCLGE_FW_MAC_SPEED_50G}, + {HCLGE_MAC_SPEED_100G, HCLGE_FW_MAC_SPEED_100G}, + {HCLGE_MAC_SPEED_200G, HCLGE_FW_MAC_SPEED_200G}, +}; + +static int hclge_convert_to_fw_speed(u32 speed_drv, u32 *speed_fw) +{ + u16 i; + + for (i = 0; i < ARRAY_SIZE(hclge_mac_speed_map_to_fw); i++) { + if (hclge_mac_speed_map_to_fw[i].speed_drv == speed_drv) { + *speed_fw = hclge_mac_speed_map_to_fw[i].speed_fw; + return 0; + } + } + + return -EINVAL; +} + +static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed, + u8 duplex, u8 lane_num) +{ + struct hclge_config_mac_speed_dup_cmd *req; + struct hclge_desc desc; + u32 speed_fw; + int ret; + + req = (struct hclge_config_mac_speed_dup_cmd *)desc.data; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false); + + if (duplex) + hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1); + + ret = hclge_convert_to_fw_speed(speed, &speed_fw); + if (ret) { + dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed); + return ret; + } + + hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, HCLGE_CFG_SPEED_S, + speed_fw); + hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B, + 1); + req->lane_num = lane_num; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "mac speed/duplex config cmd failed %d.\n", ret); + return ret; + } + + return 0; +} + +int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex, u8 lane_num) +{ + struct hclge_mac *mac = &hdev->hw.mac; + int ret; + + duplex = hclge_check_speed_dup(duplex, speed); + if (!mac->support_autoneg && mac->speed == speed && + mac->duplex == duplex && (mac->lane_num == lane_num || lane_num == 0)) + return 0; + + ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex, lane_num); + if (ret) + return ret; + + hdev->hw.mac.speed = speed; + hdev->hw.mac.duplex = duplex; + if (!lane_num) + hdev->hw.mac.lane_num = lane_num; + + return 0; +} + +static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed, + u8 duplex, u8 lane_num) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + return hclge_cfg_mac_speed_dup(hdev, speed, duplex, lane_num); +} + +static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable) +{ + struct hclge_config_auto_neg_cmd *req; + struct hclge_desc desc; + u32 flag = 0; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false); + + req = (struct hclge_config_auto_neg_cmd *)desc.data; + if (enable) + hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U); + req->cfg_an_cmd_flag = cpu_to_le32(flag); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n", + ret); + + return ret; +} + +static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + if (!hdev->hw.mac.support_autoneg) { + if (enable) { + dev_err(&hdev->pdev->dev, + "autoneg is not supported by current port\n"); + return -EOPNOTSUPP; + } else { + return 0; + } + } + + return hclge_set_autoneg_en(hdev, enable); +} + +static int hclge_get_autoneg(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct phy_device *phydev = hdev->hw.mac.phydev; + + if (phydev) + return phydev->autoneg; + + return hdev->hw.mac.autoneg; +} + +static int hclge_restart_autoneg(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + int ret; + + dev_dbg(&hdev->pdev->dev, "restart autoneg\n"); + + ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); + if (ret) + return ret; + return hclge_notify_client(hdev, HNAE3_UP_CLIENT); +} + +static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg) + return hclge_set_autoneg_en(hdev, !halt); + + return 0; +} + +static void hclge_parse_fec_stats_lanes(struct hclge_dev *hdev, + struct hclge_desc *desc, u32 desc_len) +{ + u32 lane_size = HCLGE_FEC_STATS_MAX_LANES * 2; + u32 desc_index = 0; + u32 data_index = 0; + u32 i; + + for (i = 0; i < lane_size; i++) { + if (data_index >= HCLGE_DESC_DATA_LEN) { + desc_index++; + data_index = 0; + } + + if (desc_index >= desc_len) + return; + + hdev->fec_stats.per_lanes[i] += + le32_to_cpu(desc[desc_index].data[data_index]); + data_index++; + } +} + +static void hclge_parse_fec_stats(struct hclge_dev *hdev, + struct hclge_desc *desc, u32 desc_len) +{ + struct hclge_query_fec_stats_cmd *req; + + req = (struct hclge_query_fec_stats_cmd *)desc[0].data; + + hdev->fec_stats.base_r_lane_num = req->base_r_lane_num; + hdev->fec_stats.rs_corr_blocks += + le32_to_cpu(req->rs_fec_corr_blocks); + hdev->fec_stats.rs_uncorr_blocks += + le32_to_cpu(req->rs_fec_uncorr_blocks); + hdev->fec_stats.rs_error_blocks += + le32_to_cpu(req->rs_fec_error_blocks); + hdev->fec_stats.base_r_corr_blocks += + le32_to_cpu(req->base_r_fec_corr_blocks); + hdev->fec_stats.base_r_uncorr_blocks += + le32_to_cpu(req->base_r_fec_uncorr_blocks); + + hclge_parse_fec_stats_lanes(hdev, &desc[1], desc_len - 1); +} + +static int hclge_update_fec_stats_hw(struct hclge_dev *hdev) +{ + struct hclge_desc desc[HCLGE_FEC_STATS_CMD_NUM]; + int ret; + u32 i; + + for (i = 0; i < HCLGE_FEC_STATS_CMD_NUM; i++) { + hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_FEC_STATS, + true); + if (i != (HCLGE_FEC_STATS_CMD_NUM - 1)) + desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + } + + ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_FEC_STATS_CMD_NUM); + if (ret) + return ret; + + hclge_parse_fec_stats(hdev, desc, HCLGE_FEC_STATS_CMD_NUM); + + return 0; +} + +static void hclge_update_fec_stats(struct hclge_dev *hdev) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); + int ret; + + if (!hnae3_ae_dev_fec_stats_supported(ae_dev) || + test_and_set_bit(HCLGE_STATE_FEC_STATS_UPDATING, &hdev->state)) + return; + + ret = hclge_update_fec_stats_hw(hdev); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to update fec stats, ret = %d\n", ret); + + clear_bit(HCLGE_STATE_FEC_STATS_UPDATING, &hdev->state); +} + +static void hclge_get_fec_stats_total(struct hclge_dev *hdev, + struct ethtool_fec_stats *fec_stats) +{ + fec_stats->corrected_blocks.total = hdev->fec_stats.rs_corr_blocks; + fec_stats->uncorrectable_blocks.total = + hdev->fec_stats.rs_uncorr_blocks; +} + +static void hclge_get_fec_stats_lanes(struct hclge_dev *hdev, + struct ethtool_fec_stats *fec_stats) +{ + u32 i; + + if (hdev->fec_stats.base_r_lane_num == 0 || + hdev->fec_stats.base_r_lane_num > HCLGE_FEC_STATS_MAX_LANES) { + dev_err(&hdev->pdev->dev, + "fec stats lane number(%llu) is invalid\n", + hdev->fec_stats.base_r_lane_num); + return; + } + + for (i = 0; i < hdev->fec_stats.base_r_lane_num; i++) { + fec_stats->corrected_blocks.lanes[i] = + hdev->fec_stats.base_r_corr_per_lanes[i]; + fec_stats->uncorrectable_blocks.lanes[i] = + hdev->fec_stats.base_r_uncorr_per_lanes[i]; + } +} + +static void hclge_comm_get_fec_stats(struct hclge_dev *hdev, + struct ethtool_fec_stats *fec_stats) +{ + u32 fec_mode = hdev->hw.mac.fec_mode; + + switch (fec_mode) { + case BIT(HNAE3_FEC_RS): + case BIT(HNAE3_FEC_LLRS): + hclge_get_fec_stats_total(hdev, fec_stats); + break; + case BIT(HNAE3_FEC_BASER): + hclge_get_fec_stats_lanes(hdev, fec_stats); + break; + default: + dev_err(&hdev->pdev->dev, + "fec stats is not supported by current fec mode(0x%x)\n", + fec_mode); + break; + } +} + +static void hclge_get_fec_stats(struct hnae3_handle *handle, + struct ethtool_fec_stats *fec_stats) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + u32 fec_mode = hdev->hw.mac.fec_mode; + + if (fec_mode == BIT(HNAE3_FEC_NONE) || + fec_mode == BIT(HNAE3_FEC_AUTO) || + fec_mode == BIT(HNAE3_FEC_USER_DEF)) + return; + + hclge_update_fec_stats(hdev); + + hclge_comm_get_fec_stats(hdev, fec_stats); +} + +static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode) +{ + struct hclge_config_fec_cmd *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false); + + req = (struct hclge_config_fec_cmd *)desc.data; + if (fec_mode & BIT(HNAE3_FEC_AUTO)) + hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1); + if (fec_mode & BIT(HNAE3_FEC_RS)) + hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M, + HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS); + if (fec_mode & BIT(HNAE3_FEC_LLRS)) + hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M, + HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_LLRS); + if (fec_mode & BIT(HNAE3_FEC_BASER)) + hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M, + HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret); + + return ret; +} + +static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hclge_mac *mac = &hdev->hw.mac; + int ret; + + if (fec_mode && !(mac->fec_ability & fec_mode)) { + dev_err(&hdev->pdev->dev, "unsupported fec mode\n"); + return -EINVAL; + } + + ret = hclge_set_fec_hw(hdev, fec_mode); + if (ret) + return ret; + + mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF); + return 0; +} + +static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability, + u8 *fec_mode) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hclge_mac *mac = &hdev->hw.mac; + + if (fec_ability) + *fec_ability = mac->fec_ability; + if (fec_mode) + *fec_mode = mac->fec_mode; +} + +static int hclge_mac_init(struct hclge_dev *hdev) +{ + struct hclge_mac *mac = &hdev->hw.mac; + int ret; + + hdev->support_sfp_query = true; + hdev->hw.mac.duplex = HCLGE_MAC_FULL; + ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed, + hdev->hw.mac.duplex, hdev->hw.mac.lane_num); + if (ret) + return ret; + + if (hdev->hw.mac.support_autoneg) { + ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg); + if (ret) + return ret; + } + + mac->link = 0; + + if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) { + ret = hclge_set_fec_hw(hdev, mac->user_fec_mode); + if (ret) + return ret; + } + + ret = hclge_set_mac_mtu(hdev, hdev->mps); + if (ret) { + dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret); + return ret; + } + + ret = hclge_set_default_loopback(hdev); + if (ret) + return ret; + + ret = hclge_buffer_alloc(hdev); + if (ret) + dev_err(&hdev->pdev->dev, + "allocate buffer fail, ret=%d\n", ret); + + return ret; +} + +static void hclge_mbx_task_schedule(struct hclge_dev *hdev) +{ + if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && + !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state)) { + hdev->last_mbx_scheduled = jiffies; + mod_delayed_work(hclge_wq, &hdev->service_task, 0); + } +} + +static void hclge_reset_task_schedule(struct hclge_dev *hdev) +{ + if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && + test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state) && + !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) { + hdev->last_rst_scheduled = jiffies; + mod_delayed_work(hclge_wq, &hdev->service_task, 0); + } +} + +static void hclge_errhand_task_schedule(struct hclge_dev *hdev) +{ + if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && + !test_and_set_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state)) + mod_delayed_work(hclge_wq, &hdev->service_task, 0); +} + +void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time) +{ + if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && + !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) + mod_delayed_work(hclge_wq, &hdev->service_task, delay_time); +} + +static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status) +{ + struct hclge_link_status_cmd *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n", + ret); + return ret; + } + + req = (struct hclge_link_status_cmd *)desc.data; + *link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ? + HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN; + + return 0; +} + +static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status) +{ + struct phy_device *phydev = hdev->hw.mac.phydev; + + *link_status = HCLGE_LINK_STATUS_DOWN; + + if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) + return 0; + + if (phydev && (phydev->state != PHY_RUNNING || !phydev->link)) + return 0; + + return hclge_get_mac_link_status(hdev, link_status); +} + +static void hclge_push_link_status(struct hclge_dev *hdev) +{ + struct hclge_vport *vport; + int ret; + u16 i; + + for (i = 0; i < pci_num_vf(hdev->pdev); i++) { + vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM]; + + if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) || + vport->vf_info.link_state != IFLA_VF_LINK_STATE_AUTO) + continue; + + ret = hclge_push_vf_link_status(vport); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to push link status to vf%u, ret = %d\n", + i, ret); + } + } +} + +static void hclge_update_link_status(struct hclge_dev *hdev) +{ + struct hnae3_handle *rhandle = &hdev->vport[0].roce; + struct hnae3_handle *handle = &hdev->vport[0].nic; + struct hnae3_client *rclient = hdev->roce_client; + struct hnae3_client *client = hdev->nic_client; + int state; + int ret; + + if (!client) + return; + + if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state)) + return; + + ret = hclge_get_mac_phy_link(hdev, &state); + if (ret) { + clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state); + return; + } + + if (state != hdev->hw.mac.link) { + hdev->hw.mac.link = state; + if (state == HCLGE_LINK_STATUS_UP) + hclge_update_port_info(hdev); + + client->ops->link_status_change(handle, state); + hclge_config_mac_tnl_int(hdev, state); + if (rclient && rclient->ops->link_status_change) + rclient->ops->link_status_change(rhandle, state); + + hclge_push_link_status(hdev); + } + + clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state); +} + +static void hclge_update_speed_advertising(struct hclge_mac *mac) +{ + u32 speed_ability; + + if (hclge_get_speed_bit(mac->speed, &speed_ability)) + return; + + switch (mac->module_type) { + case HNAE3_MODULE_TYPE_FIBRE_LR: + hclge_convert_setting_lr(speed_ability, mac->advertising); + break; + case HNAE3_MODULE_TYPE_FIBRE_SR: + case HNAE3_MODULE_TYPE_AOC: + hclge_convert_setting_sr(speed_ability, mac->advertising); + break; + case HNAE3_MODULE_TYPE_CR: + hclge_convert_setting_cr(speed_ability, mac->advertising); + break; + case HNAE3_MODULE_TYPE_KR: + hclge_convert_setting_kr(speed_ability, mac->advertising); + break; + default: + break; + } +} + +static void hclge_update_fec_advertising(struct hclge_mac *mac) +{ + if (mac->fec_mode & BIT(HNAE3_FEC_RS)) + linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, + mac->advertising); + else if (mac->fec_mode & BIT(HNAE3_FEC_LLRS)) + linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT, + mac->advertising); + else if (mac->fec_mode & BIT(HNAE3_FEC_BASER)) + linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, + mac->advertising); + else + linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, + mac->advertising); +} + +static void hclge_update_pause_advertising(struct hclge_dev *hdev) +{ + struct hclge_mac *mac = &hdev->hw.mac; + bool rx_en, tx_en; + + switch (hdev->fc_mode_last_time) { + case HCLGE_FC_RX_PAUSE: + rx_en = true; + tx_en = false; + break; + case HCLGE_FC_TX_PAUSE: + rx_en = false; + tx_en = true; + break; + case HCLGE_FC_FULL: + rx_en = true; + tx_en = true; + break; + default: + rx_en = false; + tx_en = false; + break; + } + + linkmode_set_pause(mac->advertising, tx_en, rx_en); +} + +static void hclge_update_advertising(struct hclge_dev *hdev) +{ + struct hclge_mac *mac = &hdev->hw.mac; + + linkmode_zero(mac->advertising); + hclge_update_speed_advertising(mac); + hclge_update_fec_advertising(mac); + hclge_update_pause_advertising(hdev); +} + +static void hclge_update_port_capability(struct hclge_dev *hdev, + struct hclge_mac *mac) +{ + if (hnae3_dev_fec_supported(hdev)) + hclge_convert_setting_fec(mac); + + /* firmware can not identify back plane type, the media type + * read from configuration can help deal it + */ + if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE && + mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN) + mac->module_type = HNAE3_MODULE_TYPE_KR; + else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER) + mac->module_type = HNAE3_MODULE_TYPE_TP; + + if (mac->support_autoneg) { + linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported); + linkmode_copy(mac->advertising, mac->supported); + } else { + linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, + mac->supported); + hclge_update_advertising(hdev); + } +} + +static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed) +{ + struct hclge_sfp_info_cmd *resp; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true); + resp = (struct hclge_sfp_info_cmd *)desc.data; + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret == -EOPNOTSUPP) { + dev_warn(&hdev->pdev->dev, + "IMP do not support get SFP speed %d\n", ret); + return ret; + } else if (ret) { + dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret); + return ret; + } + + *speed = le32_to_cpu(resp->speed); + + return 0; +} + +static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac) +{ + struct hclge_sfp_info_cmd *resp; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true); + resp = (struct hclge_sfp_info_cmd *)desc.data; + + resp->query_type = QUERY_ACTIVE_SPEED; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret == -EOPNOTSUPP) { + dev_warn(&hdev->pdev->dev, + "IMP does not support get SFP info %d\n", ret); + return ret; + } else if (ret) { + dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret); + return ret; + } + + /* In some case, mac speed get from IMP may be 0, it shouldn't be + * set to mac->speed. + */ + if (!le32_to_cpu(resp->speed)) + return 0; + + mac->speed = le32_to_cpu(resp->speed); + /* if resp->speed_ability is 0, it means it's an old version + * firmware, do not update these params + */ + if (resp->speed_ability) { + mac->module_type = le32_to_cpu(resp->module_type); + mac->speed_ability = le32_to_cpu(resp->speed_ability); + mac->autoneg = resp->autoneg; + mac->support_autoneg = resp->autoneg_ability; + mac->speed_type = QUERY_ACTIVE_SPEED; + mac->lane_num = resp->lane_num; + if (!resp->active_fec) + mac->fec_mode = 0; + else + mac->fec_mode = BIT(resp->active_fec); + mac->fec_ability = resp->fec_ability; + } else { + mac->speed_type = QUERY_SFP_SPEED; + } + + return 0; +} + +static int hclge_get_phy_link_ksettings(struct hnae3_handle *handle, + struct ethtool_link_ksettings *cmd) +{ + struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM]; + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_phy_link_ksetting_0_cmd *req0; + struct hclge_phy_link_ksetting_1_cmd *req1; + u32 supported, advertising, lp_advertising; + struct hclge_dev *hdev = vport->back; + int ret; + + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING, + true); + desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING, + true); + + ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get phy link ksetting, ret = %d.\n", ret); + return ret; + } + + req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data; + cmd->base.autoneg = req0->autoneg; + cmd->base.speed = le32_to_cpu(req0->speed); + cmd->base.duplex = req0->duplex; + cmd->base.port = req0->port; + cmd->base.transceiver = req0->transceiver; + cmd->base.phy_address = req0->phy_address; + cmd->base.eth_tp_mdix = req0->eth_tp_mdix; + cmd->base.eth_tp_mdix_ctrl = req0->eth_tp_mdix_ctrl; + supported = le32_to_cpu(req0->supported); + advertising = le32_to_cpu(req0->advertising); + lp_advertising = le32_to_cpu(req0->lp_advertising); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising, + lp_advertising); + + req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data; + cmd->base.master_slave_cfg = req1->master_slave_cfg; + cmd->base.master_slave_state = req1->master_slave_state; + + return 0; +} + +static int +hclge_set_phy_link_ksettings(struct hnae3_handle *handle, + const struct ethtool_link_ksettings *cmd) +{ + struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM]; + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_phy_link_ksetting_0_cmd *req0; + struct hclge_phy_link_ksetting_1_cmd *req1; + struct hclge_dev *hdev = vport->back; + u32 advertising; + int ret; + + if (cmd->base.autoneg == AUTONEG_DISABLE && + ((cmd->base.speed != SPEED_100 && cmd->base.speed != SPEED_10) || + (cmd->base.duplex != DUPLEX_HALF && + cmd->base.duplex != DUPLEX_FULL))) + return -EINVAL; + + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING, + false); + desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING, + false); + + req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data; + req0->autoneg = cmd->base.autoneg; + req0->speed = cpu_to_le32(cmd->base.speed); + req0->duplex = cmd->base.duplex; + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); + req0->advertising = cpu_to_le32(advertising); + req0->eth_tp_mdix_ctrl = cmd->base.eth_tp_mdix_ctrl; + + req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data; + req1->master_slave_cfg = cmd->base.master_slave_cfg; + + ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to set phy link ksettings, ret = %d.\n", ret); + return ret; + } + + hdev->hw.mac.autoneg = cmd->base.autoneg; + hdev->hw.mac.speed = cmd->base.speed; + hdev->hw.mac.duplex = cmd->base.duplex; + linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising); + + return 0; +} + +static int hclge_update_tp_port_info(struct hclge_dev *hdev) +{ + struct ethtool_link_ksettings cmd; + int ret; + + if (!hnae3_dev_phy_imp_supported(hdev)) + return 0; + + ret = hclge_get_phy_link_ksettings(&hdev->vport->nic, &cmd); + if (ret) + return ret; + + hdev->hw.mac.autoneg = cmd.base.autoneg; + hdev->hw.mac.speed = cmd.base.speed; + hdev->hw.mac.duplex = cmd.base.duplex; + linkmode_copy(hdev->hw.mac.advertising, cmd.link_modes.advertising); + + return 0; +} + +static int hclge_tp_port_init(struct hclge_dev *hdev) +{ + struct ethtool_link_ksettings cmd; + + if (!hnae3_dev_phy_imp_supported(hdev)) + return 0; + + cmd.base.autoneg = hdev->hw.mac.autoneg; + cmd.base.speed = hdev->hw.mac.speed; + cmd.base.duplex = hdev->hw.mac.duplex; + linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising); + + return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd); +} + +static int hclge_update_port_info(struct hclge_dev *hdev) +{ + struct hclge_mac *mac = &hdev->hw.mac; + int speed; + int ret; + + /* get the port info from SFP cmd if not copper port */ + if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER) + return hclge_update_tp_port_info(hdev); + + /* if IMP does not support get SFP/qSFP info, return directly */ + if (!hdev->support_sfp_query) + return 0; + + if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { + speed = mac->speed; + ret = hclge_get_sfp_info(hdev, mac); + } else { + speed = HCLGE_MAC_SPEED_UNKNOWN; + ret = hclge_get_sfp_speed(hdev, &speed); + } + + if (ret == -EOPNOTSUPP) { + hdev->support_sfp_query = false; + return ret; + } else if (ret) { + return ret; + } + + if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { + if (mac->speed_type == QUERY_ACTIVE_SPEED) { + hclge_update_port_capability(hdev, mac); + if (mac->speed != speed) + (void)hclge_tm_port_shaper_cfg(hdev); + return 0; + } + return hclge_cfg_mac_speed_dup(hdev, mac->speed, + HCLGE_MAC_FULL, mac->lane_num); + } else { + if (speed == HCLGE_MAC_SPEED_UNKNOWN) + return 0; /* do nothing if no SFP */ + + /* must config full duplex for SFP */ + return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL, 0); + } +} + +static int hclge_get_status(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + hclge_update_link_status(hdev); + + return hdev->hw.mac.link; +} + +static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf) +{ + if (!pci_num_vf(hdev->pdev)) { + dev_err(&hdev->pdev->dev, + "SRIOV is disabled, can not get vport(%d) info.\n", vf); + return NULL; + } + + if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) { + dev_err(&hdev->pdev->dev, + "vf id(%d) is out of range(0 <= vfid < %d)\n", + vf, pci_num_vf(hdev->pdev)); + return NULL; + } + + /* VF start from 1 in vport */ + vf += HCLGE_VF_VPORT_START_NUM; + return &hdev->vport[vf]; +} + +static int hclge_get_vf_config(struct hnae3_handle *handle, int vf, + struct ifla_vf_info *ivf) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + vport = hclge_get_vf_vport(hdev, vf); + if (!vport) + return -EINVAL; + + ivf->vf = vf; + ivf->linkstate = vport->vf_info.link_state; + ivf->spoofchk = vport->vf_info.spoofchk; + ivf->trusted = vport->vf_info.trusted; + ivf->min_tx_rate = 0; + ivf->max_tx_rate = vport->vf_info.max_tx_rate; + ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag; + ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto); + ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos; + ether_addr_copy(ivf->mac, vport->vf_info.mac); + + return 0; +} + +static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf, + int link_state) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + int link_state_old; + int ret; + + vport = hclge_get_vf_vport(hdev, vf); + if (!vport) + return -EINVAL; + + link_state_old = vport->vf_info.link_state; + vport->vf_info.link_state = link_state; + + /* return success directly if the VF is unalive, VF will + * query link state itself when it starts work. + */ + if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) + return 0; + + ret = hclge_push_vf_link_status(vport); + if (ret) { + vport->vf_info.link_state = link_state_old; + dev_err(&hdev->pdev->dev, + "failed to push vf%d link status, ret = %d\n", vf, ret); + } + + return ret; +} + +static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) +{ + u32 cmdq_src_reg, msix_src_reg, hw_err_src_reg; + + /* fetch the events from their corresponding regs */ + cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG); + msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS); + hw_err_src_reg = hclge_read_dev(&hdev->hw, + HCLGE_RAS_PF_OTHER_INT_STS_REG); + + /* Assumption: If by any chance reset and mailbox events are reported + * together then we will only process reset event in this go and will + * defer the processing of the mailbox events. Since, we would have not + * cleared RX CMDQ event this time we would receive again another + * interrupt from H/W just for the mailbox. + * + * check for vector0 reset event sources + */ + if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) { + dev_info(&hdev->pdev->dev, "IMP reset interrupt\n"); + set_bit(HNAE3_IMP_RESET, &hdev->reset_pending); + set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); + *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B); + hdev->rst_stats.imp_rst_cnt++; + return HCLGE_VECTOR0_EVENT_RST; + } + + if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) { + dev_info(&hdev->pdev->dev, "global reset interrupt\n"); + set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); + set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending); + *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B); + hdev->rst_stats.global_rst_cnt++; + return HCLGE_VECTOR0_EVENT_RST; + } + + /* check for vector0 msix event and hardware error event source */ + if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK || + hw_err_src_reg & HCLGE_RAS_REG_ERR_MASK) + return HCLGE_VECTOR0_EVENT_ERR; + + /* check for vector0 ptp event source */ + if (BIT(HCLGE_VECTOR0_REG_PTP_INT_B) & msix_src_reg) { + *clearval = msix_src_reg; + return HCLGE_VECTOR0_EVENT_PTP; + } + + /* check for vector0 mailbox(=CMDQ RX) event source */ + if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { + cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B); + *clearval = cmdq_src_reg; + return HCLGE_VECTOR0_EVENT_MBX; + } + + /* print other vector0 event source */ + dev_info(&hdev->pdev->dev, + "INT status: CMDQ(%#x) HW errors(%#x) other(%#x)\n", + cmdq_src_reg, hw_err_src_reg, msix_src_reg); + + return HCLGE_VECTOR0_EVENT_OTHER; +} + +static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type, + u32 regclr) +{ +#define HCLGE_IMP_RESET_DELAY 5 + + switch (event_type) { + case HCLGE_VECTOR0_EVENT_PTP: + case HCLGE_VECTOR0_EVENT_RST: + if (regclr == BIT(HCLGE_VECTOR0_IMPRESET_INT_B)) + mdelay(HCLGE_IMP_RESET_DELAY); + + hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr); + break; + case HCLGE_VECTOR0_EVENT_MBX: + hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr); + break; + default: + break; + } +} + +static void hclge_clear_all_event_cause(struct hclge_dev *hdev) +{ + hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST, + BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) | + BIT(HCLGE_VECTOR0_CORERESET_INT_B) | + BIT(HCLGE_VECTOR0_IMPRESET_INT_B)); + hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0); +} + +static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable) +{ + writel(enable ? 1 : 0, vector->addr); +} + +static irqreturn_t hclge_misc_irq_handle(int irq, void *data) +{ + struct hclge_dev *hdev = data; + unsigned long flags; + u32 clearval = 0; + u32 event_cause; + + hclge_enable_vector(&hdev->misc_vector, false); + event_cause = hclge_check_event_cause(hdev, &clearval); + + /* vector 0 interrupt is shared with reset and mailbox source events. */ + switch (event_cause) { + case HCLGE_VECTOR0_EVENT_ERR: + hclge_errhand_task_schedule(hdev); + break; + case HCLGE_VECTOR0_EVENT_RST: + hclge_reset_task_schedule(hdev); + break; + case HCLGE_VECTOR0_EVENT_PTP: + spin_lock_irqsave(&hdev->ptp->lock, flags); + hclge_ptp_clean_tx_hwts(hdev); + spin_unlock_irqrestore(&hdev->ptp->lock, flags); + break; + case HCLGE_VECTOR0_EVENT_MBX: + /* If we are here then, + * 1. Either we are not handling any mbx task and we are not + * scheduled as well + * OR + * 2. We could be handling a mbx task but nothing more is + * scheduled. + * In both cases, we should schedule mbx task as there are more + * mbx messages reported by this interrupt. + */ + hclge_mbx_task_schedule(hdev); + break; + default: + dev_warn(&hdev->pdev->dev, + "received unknown or unhandled event of vector0\n"); + break; + } + + hclge_clear_event_cause(hdev, event_cause, clearval); + + /* Enable interrupt if it is not caused by reset event or error event */ + if (event_cause == HCLGE_VECTOR0_EVENT_PTP || + event_cause == HCLGE_VECTOR0_EVENT_MBX || + event_cause == HCLGE_VECTOR0_EVENT_OTHER) + hclge_enable_vector(&hdev->misc_vector, true); + + return IRQ_HANDLED; +} + +static void hclge_free_vector(struct hclge_dev *hdev, int vector_id) +{ + if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) { + dev_warn(&hdev->pdev->dev, + "vector(vector_id %d) has been freed.\n", vector_id); + return; + } + + hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT; + hdev->num_msi_left += 1; + hdev->num_msi_used -= 1; +} + +static void hclge_get_misc_vector(struct hclge_dev *hdev) +{ + struct hclge_misc_vector *vector = &hdev->misc_vector; + + vector->vector_irq = pci_irq_vector(hdev->pdev, 0); + + vector->addr = hdev->hw.hw.io_base + HCLGE_MISC_VECTOR_REG_BASE; + hdev->vector_status[0] = 0; + + hdev->num_msi_left -= 1; + hdev->num_msi_used += 1; +} + +static int hclge_misc_irq_init(struct hclge_dev *hdev) +{ + int ret; + + hclge_get_misc_vector(hdev); + + /* this would be explicitly freed in the end */ + snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s", + HCLGE_NAME, pci_name(hdev->pdev)); + ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle, + 0, hdev->misc_vector.name, hdev); + if (ret) { + hclge_free_vector(hdev, 0); + dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n", + hdev->misc_vector.vector_irq); + } + + return ret; +} + +static void hclge_misc_irq_uninit(struct hclge_dev *hdev) +{ + free_irq(hdev->misc_vector.vector_irq, hdev); + hclge_free_vector(hdev, 0); +} + +int hclge_notify_client(struct hclge_dev *hdev, + enum hnae3_reset_notify_type type) +{ + struct hnae3_handle *handle = &hdev->vport[0].nic; + struct hnae3_client *client = hdev->nic_client; + int ret; + + if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client) + return 0; + + if (!client->ops->reset_notify) + return -EOPNOTSUPP; + + ret = client->ops->reset_notify(handle, type); + if (ret) + dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n", + type, ret); + + return ret; +} + +static int hclge_notify_roce_client(struct hclge_dev *hdev, + enum hnae3_reset_notify_type type) +{ + struct hnae3_handle *handle = &hdev->vport[0].roce; + struct hnae3_client *client = hdev->roce_client; + int ret; + + if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client) + return 0; + + if (!client->ops->reset_notify) + return -EOPNOTSUPP; + + ret = client->ops->reset_notify(handle, type); + if (ret) + dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)", + type, ret); + + return ret; +} + +static int hclge_reset_wait(struct hclge_dev *hdev) +{ +#define HCLGE_RESET_WATI_MS 100 +#define HCLGE_RESET_WAIT_CNT 350 + + u32 val, reg, reg_bit; + u32 cnt = 0; + + switch (hdev->reset_type) { + case HNAE3_IMP_RESET: + reg = HCLGE_GLOBAL_RESET_REG; + reg_bit = HCLGE_IMP_RESET_BIT; + break; + case HNAE3_GLOBAL_RESET: + reg = HCLGE_GLOBAL_RESET_REG; + reg_bit = HCLGE_GLOBAL_RESET_BIT; + break; + case HNAE3_FUNC_RESET: + reg = HCLGE_FUN_RST_ING; + reg_bit = HCLGE_FUN_RST_ING_B; + break; + default: + dev_err(&hdev->pdev->dev, + "Wait for unsupported reset type: %d\n", + hdev->reset_type); + return -EINVAL; + } + + val = hclge_read_dev(&hdev->hw, reg); + while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) { + msleep(HCLGE_RESET_WATI_MS); + val = hclge_read_dev(&hdev->hw, reg); + cnt++; + } + + if (cnt >= HCLGE_RESET_WAIT_CNT) { + dev_warn(&hdev->pdev->dev, + "Wait for reset timeout: %d\n", hdev->reset_type); + return -EBUSY; + } + + return 0; +} + +static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset) +{ + struct hclge_vf_rst_cmd *req; + struct hclge_desc desc; + + req = (struct hclge_vf_rst_cmd *)desc.data; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false); + req->dest_vfid = func_id; + + if (reset) + req->vf_rst = 0x1; + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset) +{ + int i; + + for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++) { + struct hclge_vport *vport = &hdev->vport[i]; + int ret; + + /* Send cmd to set/clear VF's FUNC_RST_ING */ + ret = hclge_set_vf_rst(hdev, vport->vport_id, reset); + if (ret) { + dev_err(&hdev->pdev->dev, + "set vf(%u) rst failed %d!\n", + vport->vport_id - HCLGE_VF_VPORT_START_NUM, + ret); + return ret; + } + + if (!reset || + !test_bit(HCLGE_VPORT_STATE_INITED, &vport->state)) + continue; + + if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) && + hdev->reset_type == HNAE3_FUNC_RESET) { + set_bit(HCLGE_VPORT_NEED_NOTIFY_RESET, + &vport->need_notify); + continue; + } + + /* Inform VF to process the reset. + * hclge_inform_reset_assert_to_vf may fail if VF + * driver is not loaded. + */ + ret = hclge_inform_reset_assert_to_vf(vport); + if (ret) + dev_warn(&hdev->pdev->dev, + "inform reset to vf(%u) failed %d!\n", + vport->vport_id - HCLGE_VF_VPORT_START_NUM, + ret); + } + + return 0; +} + +static void hclge_mailbox_service_task(struct hclge_dev *hdev) +{ + if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) || + test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state) || + test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state)) + return; + + if (time_is_before_jiffies(hdev->last_mbx_scheduled + + HCLGE_MBX_SCHED_TIMEOUT)) + dev_warn(&hdev->pdev->dev, + "mbx service task is scheduled after %ums on cpu%u!\n", + jiffies_to_msecs(jiffies - hdev->last_mbx_scheduled), + smp_processor_id()); + + hclge_mbx_handler(hdev); + + clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); +} + +static void hclge_func_reset_sync_vf(struct hclge_dev *hdev) +{ + struct hclge_pf_rst_sync_cmd *req; + struct hclge_desc desc; + int cnt = 0; + int ret; + + req = (struct hclge_pf_rst_sync_cmd *)desc.data; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true); + + do { + /* vf need to down netdev by mbx during PF or FLR reset */ + hclge_mailbox_service_task(hdev); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + /* for compatible with old firmware, wait + * 100 ms for VF to stop IO + */ + if (ret == -EOPNOTSUPP) { + msleep(HCLGE_RESET_SYNC_TIME); + return; + } else if (ret) { + dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n", + ret); + return; + } else if (req->all_vf_ready) { + return; + } + msleep(HCLGE_PF_RESET_SYNC_TIME); + hclge_comm_cmd_reuse_desc(&desc, true); + } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT); + + dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n"); +} + +void hclge_report_hw_error(struct hclge_dev *hdev, + enum hnae3_hw_error_type type) +{ + struct hnae3_client *client = hdev->nic_client; + + if (!client || !client->ops->process_hw_error || + !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state)) + return; + + client->ops->process_hw_error(&hdev->vport[0].nic, type); +} + +static void hclge_handle_imp_error(struct hclge_dev *hdev) +{ + u32 reg_val; + + reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG); + if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) { + hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR); + reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B); + hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val); + } + + if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) { + hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR); + reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B); + hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val); + } +} + +int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id) +{ + struct hclge_desc desc; + struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false); + hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1); + req->fun_reset_vfid = func_id; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "send function reset cmd fail, status =%d\n", ret); + + return ret; +} + +static void hclge_do_reset(struct hclge_dev *hdev) +{ + struct hnae3_handle *handle = &hdev->vport[0].nic; + struct pci_dev *pdev = hdev->pdev; + u32 val; + + if (hclge_get_hw_reset_stat(handle)) { + dev_info(&pdev->dev, "hardware reset not finish\n"); + dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n", + hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING), + hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG)); + return; + } + + switch (hdev->reset_type) { + case HNAE3_IMP_RESET: + dev_info(&pdev->dev, "IMP reset requested\n"); + val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG); + hnae3_set_bit(val, HCLGE_TRIGGER_IMP_RESET_B, 1); + hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, val); + break; + case HNAE3_GLOBAL_RESET: + dev_info(&pdev->dev, "global reset requested\n"); + val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); + hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1); + hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val); + break; + case HNAE3_FUNC_RESET: + dev_info(&pdev->dev, "PF reset requested\n"); + /* schedule again to check later */ + set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending); + hclge_reset_task_schedule(hdev); + break; + default: + dev_warn(&pdev->dev, + "unsupported reset type: %d\n", hdev->reset_type); + break; + } +} + +static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev, + unsigned long *addr) +{ + enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; + struct hclge_dev *hdev = ae_dev->priv; + + /* return the highest priority reset level amongst all */ + if (test_bit(HNAE3_IMP_RESET, addr)) { + rst_level = HNAE3_IMP_RESET; + clear_bit(HNAE3_IMP_RESET, addr); + clear_bit(HNAE3_GLOBAL_RESET, addr); + clear_bit(HNAE3_FUNC_RESET, addr); + } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) { + rst_level = HNAE3_GLOBAL_RESET; + clear_bit(HNAE3_GLOBAL_RESET, addr); + clear_bit(HNAE3_FUNC_RESET, addr); + } else if (test_bit(HNAE3_FUNC_RESET, addr)) { + rst_level = HNAE3_FUNC_RESET; + clear_bit(HNAE3_FUNC_RESET, addr); + } else if (test_bit(HNAE3_FLR_RESET, addr)) { + rst_level = HNAE3_FLR_RESET; + clear_bit(HNAE3_FLR_RESET, addr); + } + + if (hdev->reset_type != HNAE3_NONE_RESET && + rst_level < hdev->reset_type) + return HNAE3_NONE_RESET; + + return rst_level; +} + +static void hclge_clear_reset_cause(struct hclge_dev *hdev) +{ + u32 clearval = 0; + + switch (hdev->reset_type) { + case HNAE3_IMP_RESET: + clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B); + break; + case HNAE3_GLOBAL_RESET: + clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B); + break; + default: + break; + } + + if (!clearval) + return; + + /* For revision 0x20, the reset interrupt source + * can only be cleared after hardware reset done + */ + if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) + hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, + clearval); + + hclge_enable_vector(&hdev->misc_vector, true); +} + +static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable) +{ + u32 reg_val; + + reg_val = hclge_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG); + if (enable) + reg_val |= HCLGE_COMM_NIC_SW_RST_RDY; + else + reg_val &= ~HCLGE_COMM_NIC_SW_RST_RDY; + + hclge_write_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG, reg_val); +} + +static int hclge_func_reset_notify_vf(struct hclge_dev *hdev) +{ + int ret; + + ret = hclge_set_all_vf_rst(hdev, true); + if (ret) + return ret; + + hclge_func_reset_sync_vf(hdev); + + return 0; +} + +static int hclge_reset_prepare_wait(struct hclge_dev *hdev) +{ + u32 reg_val; + int ret = 0; + + switch (hdev->reset_type) { + case HNAE3_FUNC_RESET: + ret = hclge_func_reset_notify_vf(hdev); + if (ret) + return ret; + + ret = hclge_func_reset_cmd(hdev, 0); + if (ret) { + dev_err(&hdev->pdev->dev, + "asserting function reset fail %d!\n", ret); + return ret; + } + + /* After performaning pf reset, it is not necessary to do the + * mailbox handling or send any command to firmware, because + * any mailbox handling or command to firmware is only valid + * after hclge_comm_cmd_init is called. + */ + set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); + hdev->rst_stats.pf_rst_cnt++; + break; + case HNAE3_FLR_RESET: + ret = hclge_func_reset_notify_vf(hdev); + if (ret) + return ret; + break; + case HNAE3_IMP_RESET: + hclge_handle_imp_error(hdev); + reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG); + hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, + BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val); + break; + default: + break; + } + + /* inform hardware that preparatory work is done */ + msleep(HCLGE_RESET_SYNC_TIME); + hclge_reset_handshake(hdev, true); + dev_info(&hdev->pdev->dev, "prepare wait ok\n"); + + return ret; +} + +static void hclge_show_rst_info(struct hclge_dev *hdev) +{ + char *buf; + + buf = kzalloc(HCLGE_DBG_RESET_INFO_LEN, GFP_KERNEL); + if (!buf) + return; + + hclge_dbg_dump_rst_info(hdev, buf, HCLGE_DBG_RESET_INFO_LEN); + + dev_info(&hdev->pdev->dev, "dump reset info:\n%s", buf); + + kfree(buf); +} + +static bool hclge_reset_err_handle(struct hclge_dev *hdev) +{ +#define MAX_RESET_FAIL_CNT 5 + + if (hdev->reset_pending) { + dev_info(&hdev->pdev->dev, "Reset pending %lu\n", + hdev->reset_pending); + return true; + } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) & + HCLGE_RESET_INT_M) { + dev_info(&hdev->pdev->dev, + "reset failed because new reset interrupt\n"); + hclge_clear_reset_cause(hdev); + return false; + } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) { + hdev->rst_stats.reset_fail_cnt++; + set_bit(hdev->reset_type, &hdev->reset_pending); + dev_info(&hdev->pdev->dev, + "re-schedule reset task(%u)\n", + hdev->rst_stats.reset_fail_cnt); + return true; + } + + hclge_clear_reset_cause(hdev); + + /* recover the handshake status when reset fail */ + hclge_reset_handshake(hdev, true); + + dev_err(&hdev->pdev->dev, "Reset fail!\n"); + + hclge_show_rst_info(hdev); + + set_bit(HCLGE_STATE_RST_FAIL, &hdev->state); + + return false; +} + +static void hclge_update_reset_level(struct hclge_dev *hdev) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); + enum hnae3_reset_type reset_level; + + /* reset request will not be set during reset, so clear + * pending reset request to avoid unnecessary reset + * caused by the same reason. + */ + hclge_get_reset_level(ae_dev, &hdev->reset_request); + + /* if default_reset_request has a higher level reset request, + * it should be handled as soon as possible. since some errors + * need this kind of reset to fix. + */ + reset_level = hclge_get_reset_level(ae_dev, + &hdev->default_reset_request); + if (reset_level != HNAE3_NONE_RESET) + set_bit(reset_level, &hdev->reset_request); +} + +static int hclge_set_rst_done(struct hclge_dev *hdev) +{ + struct hclge_pf_rst_done_cmd *req; + struct hclge_desc desc; + int ret; + + req = (struct hclge_pf_rst_done_cmd *)desc.data; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false); + req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + /* To be compatible with the old firmware, which does not support + * command HCLGE_OPC_PF_RST_DONE, just print a warning and + * return success + */ + if (ret == -EOPNOTSUPP) { + dev_warn(&hdev->pdev->dev, + "current firmware does not support command(0x%x)!\n", + HCLGE_OPC_PF_RST_DONE); + return 0; + } else if (ret) { + dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n", + ret); + } + + return ret; +} + +static int hclge_reset_prepare_up(struct hclge_dev *hdev) +{ + int ret = 0; + + switch (hdev->reset_type) { + case HNAE3_FUNC_RESET: + case HNAE3_FLR_RESET: + ret = hclge_set_all_vf_rst(hdev, false); + break; + case HNAE3_GLOBAL_RESET: + case HNAE3_IMP_RESET: + ret = hclge_set_rst_done(hdev); + break; + default: + break; + } + + /* clear up the handshake status after re-initialize done */ + hclge_reset_handshake(hdev, false); + + return ret; +} + +static int hclge_reset_stack(struct hclge_dev *hdev) +{ + int ret; + + ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT); + if (ret) + return ret; + + ret = hclge_reset_ae_dev(hdev->ae_dev); + if (ret) + return ret; + + return hclge_notify_client(hdev, HNAE3_INIT_CLIENT); +} + +static int hclge_reset_prepare(struct hclge_dev *hdev) +{ + int ret; + + hdev->rst_stats.reset_cnt++; + /* perform reset of the stack & ae device for a client */ + ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT); + if (ret) + return ret; + + rtnl_lock(); + ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); + rtnl_unlock(); + if (ret) + return ret; + + return hclge_reset_prepare_wait(hdev); +} + +static int hclge_reset_rebuild(struct hclge_dev *hdev) +{ + int ret; + + hdev->rst_stats.hw_reset_done_cnt++; + + ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT); + if (ret) + return ret; + + rtnl_lock(); + ret = hclge_reset_stack(hdev); + rtnl_unlock(); + if (ret) + return ret; + + hclge_clear_reset_cause(hdev); + + ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT); + /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1 + * times + */ + if (ret && + hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1) + return ret; + + ret = hclge_reset_prepare_up(hdev); + if (ret) + return ret; + + rtnl_lock(); + ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT); + rtnl_unlock(); + if (ret) + return ret; + + ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT); + if (ret) + return ret; + + hdev->last_reset_time = jiffies; + hdev->rst_stats.reset_fail_cnt = 0; + hdev->rst_stats.reset_done_cnt++; + clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state); + + hclge_update_reset_level(hdev); + + return 0; +} + +static void hclge_reset(struct hclge_dev *hdev) +{ + if (hclge_reset_prepare(hdev)) + goto err_reset; + + if (hclge_reset_wait(hdev)) + goto err_reset; + + if (hclge_reset_rebuild(hdev)) + goto err_reset; + + return; + +err_reset: + if (hclge_reset_err_handle(hdev)) + hclge_reset_task_schedule(hdev); +} + +static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); + struct hclge_dev *hdev = ae_dev->priv; + + /* We might end up getting called broadly because of 2 below cases: + * 1. Recoverable error was conveyed through APEI and only way to bring + * normalcy is to reset. + * 2. A new reset request from the stack due to timeout + * + * check if this is a new reset request and we are not here just because + * last reset attempt did not succeed and watchdog hit us again. We will + * know this if last reset request did not occur very recently (watchdog + * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz) + * In case of new request we reset the "reset level" to PF reset. + * And if it is a repeat reset request of the most recent one then we + * want to make sure we throttle the reset request. Therefore, we will + * not allow it again before 3*HZ times. + */ + + if (time_before(jiffies, (hdev->last_reset_time + + HCLGE_RESET_INTERVAL))) { + mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL); + return; + } + + if (hdev->default_reset_request) { + hdev->reset_level = + hclge_get_reset_level(ae_dev, + &hdev->default_reset_request); + } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) { + hdev->reset_level = HNAE3_FUNC_RESET; + } + + dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n", + hdev->reset_level); + + /* request reset & schedule reset task */ + set_bit(hdev->reset_level, &hdev->reset_request); + hclge_reset_task_schedule(hdev); + + if (hdev->reset_level < HNAE3_GLOBAL_RESET) + hdev->reset_level++; +} + +static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev, + enum hnae3_reset_type rst_type) +{ + struct hclge_dev *hdev = ae_dev->priv; + + set_bit(rst_type, &hdev->default_reset_request); +} + +static void hclge_reset_timer(struct timer_list *t) +{ + struct hclge_dev *hdev = from_timer(hdev, t, reset_timer); + + /* if default_reset_request has no value, it means that this reset + * request has already be handled, so just return here + */ + if (!hdev->default_reset_request) + return; + + dev_info(&hdev->pdev->dev, + "triggering reset in reset timer\n"); + hclge_reset_event(hdev->pdev, NULL); +} + +static void hclge_reset_subtask(struct hclge_dev *hdev) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); + + /* check if there is any ongoing reset in the hardware. This status can + * be checked from reset_pending. If there is then, we need to wait for + * hardware to complete reset. + * a. If we are able to figure out in reasonable time that hardware + * has fully resetted then, we can proceed with driver, client + * reset. + * b. else, we can come back later to check this status so re-sched + * now. + */ + hdev->last_reset_time = jiffies; + hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending); + if (hdev->reset_type != HNAE3_NONE_RESET) + hclge_reset(hdev); + + /* check if we got any *new* reset requests to be honored */ + hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request); + if (hdev->reset_type != HNAE3_NONE_RESET) + hclge_do_reset(hdev); + + hdev->reset_type = HNAE3_NONE_RESET; +} + +static void hclge_handle_err_reset_request(struct hclge_dev *hdev) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); + enum hnae3_reset_type reset_type; + + if (ae_dev->hw_err_reset_req) { + reset_type = hclge_get_reset_level(ae_dev, + &ae_dev->hw_err_reset_req); + hclge_set_def_reset_request(ae_dev, reset_type); + } + + if (hdev->default_reset_request && ae_dev->ops->reset_event) + ae_dev->ops->reset_event(hdev->pdev, NULL); + + /* enable interrupt after error handling complete */ + hclge_enable_vector(&hdev->misc_vector, true); +} + +static void hclge_handle_err_recovery(struct hclge_dev *hdev) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); + + ae_dev->hw_err_reset_req = 0; + + if (hclge_find_error_source(hdev)) { + hclge_handle_error_info_log(ae_dev); + hclge_handle_mac_tnl(hdev); + } + + hclge_handle_err_reset_request(hdev); +} + +static void hclge_misc_err_recovery(struct hclge_dev *hdev) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); + struct device *dev = &hdev->pdev->dev; + u32 msix_sts_reg; + + msix_sts_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS); + if (msix_sts_reg & HCLGE_VECTOR0_REG_MSIX_MASK) { + if (hclge_handle_hw_msix_error + (hdev, &hdev->default_reset_request)) + dev_info(dev, "received msix interrupt 0x%x\n", + msix_sts_reg); + } + + hclge_handle_hw_ras_error(ae_dev); + + hclge_handle_err_reset_request(hdev); +} + +static void hclge_errhand_service_task(struct hclge_dev *hdev) +{ + if (!test_and_clear_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state)) + return; + + if (hnae3_dev_ras_imp_supported(hdev)) + hclge_handle_err_recovery(hdev); + else + hclge_misc_err_recovery(hdev); +} + +static void hclge_reset_service_task(struct hclge_dev *hdev) +{ + if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) + return; + + if (time_is_before_jiffies(hdev->last_rst_scheduled + + HCLGE_RESET_SCHED_TIMEOUT)) + dev_warn(&hdev->pdev->dev, + "reset service task is scheduled after %ums on cpu%u!\n", + jiffies_to_msecs(jiffies - hdev->last_rst_scheduled), + smp_processor_id()); + + down(&hdev->reset_sem); + set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); + + hclge_reset_subtask(hdev); + + clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); + up(&hdev->reset_sem); +} + +static void hclge_update_vport_alive(struct hclge_dev *hdev) +{ +#define HCLGE_ALIVE_SECONDS_NORMAL 8 + + unsigned long alive_time = HCLGE_ALIVE_SECONDS_NORMAL * HZ; + int i; + + /* start from vport 1 for PF is always alive */ + for (i = 1; i < hdev->num_alloc_vport; i++) { + struct hclge_vport *vport = &hdev->vport[i]; + + if (!test_bit(HCLGE_VPORT_STATE_INITED, &vport->state) || + !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) + continue; + if (time_after(jiffies, vport->last_active_jiffies + + alive_time)) { + clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); + dev_warn(&hdev->pdev->dev, + "VF %u heartbeat timeout\n", + i - HCLGE_VF_VPORT_START_NUM); + } + } +} + +static void hclge_periodic_service_task(struct hclge_dev *hdev) +{ + unsigned long delta = round_jiffies_relative(HZ); + + if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) + return; + + /* Always handle the link updating to make sure link state is + * updated when it is triggered by mbx. + */ + hclge_update_link_status(hdev); + hclge_sync_mac_table(hdev); + hclge_sync_promisc_mode(hdev); + hclge_sync_fd_table(hdev); + + if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) { + delta = jiffies - hdev->last_serv_processed; + + if (delta < round_jiffies_relative(HZ)) { + delta = round_jiffies_relative(HZ) - delta; + goto out; + } + } + + hdev->serv_processed_cnt++; + hclge_update_vport_alive(hdev); + + if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) { + hdev->last_serv_processed = jiffies; + goto out; + } + + if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL)) + hclge_update_stats_for_all(hdev); + + hclge_update_port_info(hdev); + hclge_sync_vlan_filter(hdev); + + if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL)) + hclge_rfs_filter_expire(hdev); + + hdev->last_serv_processed = jiffies; + +out: + hclge_task_schedule(hdev, delta); +} + +static void hclge_ptp_service_task(struct hclge_dev *hdev) +{ + unsigned long flags; + + if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state) || + !test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state) || + !time_is_before_jiffies(hdev->ptp->tx_start + HZ)) + return; + + /* to prevent concurrence with the irq handler */ + spin_lock_irqsave(&hdev->ptp->lock, flags); + + /* check HCLGE_STATE_PTP_TX_HANDLING here again, since the irq + * handler may handle it just before spin_lock_irqsave(). + */ + if (test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state)) + hclge_ptp_clean_tx_hwts(hdev); + + spin_unlock_irqrestore(&hdev->ptp->lock, flags); +} + +static void hclge_service_task(struct work_struct *work) +{ + struct hclge_dev *hdev = + container_of(work, struct hclge_dev, service_task.work); + + hclge_errhand_service_task(hdev); + hclge_reset_service_task(hdev); + hclge_ptp_service_task(hdev); + hclge_mailbox_service_task(hdev); + hclge_periodic_service_task(hdev); + + /* Handle error recovery, reset and mbx again in case periodical task + * delays the handling by calling hclge_task_schedule() in + * hclge_periodic_service_task(). + */ + hclge_errhand_service_task(hdev); + hclge_reset_service_task(hdev); + hclge_mailbox_service_task(hdev); +} + +struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle) +{ + /* VF handle has no client */ + if (!handle->client) + return container_of(handle, struct hclge_vport, nic); + else if (handle->client->type == HNAE3_CLIENT_ROCE) + return container_of(handle, struct hclge_vport, roce); + else + return container_of(handle, struct hclge_vport, nic); +} + +static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx, + struct hnae3_vector_info *vector_info) +{ +#define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 64 + + vector_info->vector = pci_irq_vector(hdev->pdev, idx); + + /* need an extend offset to config vector >= 64 */ + if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2) + vector_info->io_addr = hdev->hw.hw.io_base + + HCLGE_VECTOR_REG_BASE + + (idx - 1) * HCLGE_VECTOR_REG_OFFSET; + else + vector_info->io_addr = hdev->hw.hw.io_base + + HCLGE_VECTOR_EXT_REG_BASE + + (idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 * + HCLGE_VECTOR_REG_OFFSET_H + + (idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 * + HCLGE_VECTOR_REG_OFFSET; + + hdev->vector_status[idx] = hdev->vport[0].vport_id; + hdev->vector_irq[idx] = vector_info->vector; +} + +static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num, + struct hnae3_vector_info *vector_info) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hnae3_vector_info *vector = vector_info; + struct hclge_dev *hdev = vport->back; + int alloc = 0; + u16 i = 0; + u16 j; + + vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num); + vector_num = min(hdev->num_msi_left, vector_num); + + for (j = 0; j < vector_num; j++) { + while (++i < hdev->num_nic_msi) { + if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) { + hclge_get_vector_info(hdev, i, vector); + vector++; + alloc++; + + break; + } + } + } + hdev->num_msi_left -= alloc; + hdev->num_msi_used += alloc; + + return alloc; +} + +static int hclge_get_vector_index(struct hclge_dev *hdev, int vector) +{ + int i; + + for (i = 0; i < hdev->num_msi; i++) + if (vector == hdev->vector_irq[i]) + return i; + + return -EINVAL; +} + +static int hclge_put_vector(struct hnae3_handle *handle, int vector) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + int vector_id; + + vector_id = hclge_get_vector_index(hdev, vector); + if (vector_id < 0) { + dev_err(&hdev->pdev->dev, + "Get vector index fail. vector = %d\n", vector); + return vector_id; + } + + hclge_free_vector(hdev, vector_id); + + return 0; +} + +static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir, + u8 *key, u8 *hfunc) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_comm_rss_cfg *rss_cfg = &vport->back->rss_cfg; + + hclge_comm_get_rss_hash_info(rss_cfg, key, hfunc); + + hclge_comm_get_rss_indir_tbl(rss_cfg, indir, + ae_dev->dev_specs.rss_ind_tbl_size); + + return 0; +} + +static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; + int ret, i; + + ret = hclge_comm_set_rss_hash_key(rss_cfg, &hdev->hw.hw, key, hfunc); + if (ret) { + dev_err(&hdev->pdev->dev, "invalid hfunc type %u\n", hfunc); + return ret; + } + + /* Update the shadow RSS table with user specified qids */ + for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++) + rss_cfg->rss_indirection_tbl[i] = indir[i]; + + /* Update the hardware */ + return hclge_comm_set_rss_indir_table(ae_dev, &hdev->hw.hw, + rss_cfg->rss_indirection_tbl); +} + +static int hclge_set_rss_tuple(struct hnae3_handle *handle, + struct ethtool_rxnfc *nfc) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + int ret; + + ret = hclge_comm_set_rss_tuple(hdev->ae_dev, &hdev->hw.hw, + &hdev->rss_cfg, nfc); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to set rss tuple, ret = %d.\n", ret); + return ret; + } + + return 0; +} + +static int hclge_get_rss_tuple(struct hnae3_handle *handle, + struct ethtool_rxnfc *nfc) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + u8 tuple_sets; + int ret; + + nfc->data = 0; + + ret = hclge_comm_get_rss_tuple(&vport->back->rss_cfg, nfc->flow_type, + &tuple_sets); + if (ret || !tuple_sets) + return ret; + + nfc->data = hclge_comm_convert_rss_tuple(tuple_sets); + + return 0; +} + +static int hclge_get_tc_size(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + return hdev->pf_rss_size_max; +} + +static int hclge_init_rss_tc_mode(struct hclge_dev *hdev) +{ + struct hnae3_ae_dev *ae_dev = hdev->ae_dev; + struct hclge_vport *vport = hdev->vport; + u16 tc_offset[HCLGE_MAX_TC_NUM] = {0}; + u16 tc_valid[HCLGE_MAX_TC_NUM] = {0}; + u16 tc_size[HCLGE_MAX_TC_NUM] = {0}; + struct hnae3_tc_info *tc_info; + u16 roundup_size; + u16 rss_size; + int i; + + tc_info = &vport->nic.kinfo.tc_info; + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { + rss_size = tc_info->tqp_count[i]; + tc_valid[i] = 0; + + if (!(hdev->hw_tc_map & BIT(i))) + continue; + + /* tc_size set to hardware is the log2 of roundup power of two + * of rss_size, the acutal queue size is limited by indirection + * table. + */ + if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size || + rss_size == 0) { + dev_err(&hdev->pdev->dev, + "Configure rss tc size failed, invalid TC_SIZE = %u\n", + rss_size); + return -EINVAL; + } + + roundup_size = roundup_pow_of_two(rss_size); + roundup_size = ilog2(roundup_size); + + tc_valid[i] = 1; + tc_size[i] = roundup_size; + tc_offset[i] = tc_info->tqp_offset[i]; + } + + return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, tc_valid, + tc_size); +} + +int hclge_rss_init_hw(struct hclge_dev *hdev) +{ + u16 *rss_indir = hdev->rss_cfg.rss_indirection_tbl; + u8 *key = hdev->rss_cfg.rss_hash_key; + u8 hfunc = hdev->rss_cfg.rss_algo; + int ret; + + ret = hclge_comm_set_rss_indir_table(hdev->ae_dev, &hdev->hw.hw, + rss_indir); + if (ret) + return ret; + + ret = hclge_comm_set_rss_algo_key(&hdev->hw.hw, hfunc, key); + if (ret) + return ret; + + ret = hclge_comm_set_rss_input_tuple(&hdev->vport[0].nic, + &hdev->hw.hw, true, + &hdev->rss_cfg); + if (ret) + return ret; + + return hclge_init_rss_tc_mode(hdev); +} + +int hclge_bind_ring_with_vector(struct hclge_vport *vport, + int vector_id, bool en, + struct hnae3_ring_chain_node *ring_chain) +{ + struct hclge_dev *hdev = vport->back; + struct hnae3_ring_chain_node *node; + struct hclge_desc desc; + struct hclge_ctrl_vector_chain_cmd *req = + (struct hclge_ctrl_vector_chain_cmd *)desc.data; + enum hclge_comm_cmd_status status; + enum hclge_opcode_type op; + u16 tqp_type_and_id; + int i; + + op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR; + hclge_cmd_setup_basic_desc(&desc, op, false); + req->int_vector_id_l = hnae3_get_field(vector_id, + HCLGE_VECTOR_ID_L_M, + HCLGE_VECTOR_ID_L_S); + req->int_vector_id_h = hnae3_get_field(vector_id, + HCLGE_VECTOR_ID_H_M, + HCLGE_VECTOR_ID_H_S); + + i = 0; + for (node = ring_chain; node; node = node->next) { + tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]); + hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M, + HCLGE_INT_TYPE_S, + hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B)); + hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M, + HCLGE_TQP_ID_S, node->tqp_index); + hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M, + HCLGE_INT_GL_IDX_S, + hnae3_get_field(node->int_gl_idx, + HNAE3_RING_GL_IDX_M, + HNAE3_RING_GL_IDX_S)); + req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id); + if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) { + req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD; + req->vfid = vport->vport_id; + + status = hclge_cmd_send(&hdev->hw, &desc, 1); + if (status) { + dev_err(&hdev->pdev->dev, + "Map TQP fail, status is %d.\n", + status); + return -EIO; + } + i = 0; + + hclge_cmd_setup_basic_desc(&desc, + op, + false); + req->int_vector_id_l = + hnae3_get_field(vector_id, + HCLGE_VECTOR_ID_L_M, + HCLGE_VECTOR_ID_L_S); + req->int_vector_id_h = + hnae3_get_field(vector_id, + HCLGE_VECTOR_ID_H_M, + HCLGE_VECTOR_ID_H_S); + } + } + + if (i > 0) { + req->int_cause_num = i; + req->vfid = vport->vport_id; + status = hclge_cmd_send(&hdev->hw, &desc, 1); + if (status) { + dev_err(&hdev->pdev->dev, + "Map TQP fail, status is %d.\n", status); + return -EIO; + } + } + + return 0; +} + +static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector, + struct hnae3_ring_chain_node *ring_chain) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + int vector_id; + + vector_id = hclge_get_vector_index(hdev, vector); + if (vector_id < 0) { + dev_err(&hdev->pdev->dev, + "failed to get vector index. vector=%d\n", vector); + return vector_id; + } + + return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain); +} + +static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector, + struct hnae3_ring_chain_node *ring_chain) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + int vector_id, ret; + + if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) + return 0; + + vector_id = hclge_get_vector_index(hdev, vector); + if (vector_id < 0) { + dev_err(&handle->pdev->dev, + "Get vector index fail. ret =%d\n", vector_id); + return vector_id; + } + + ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain); + if (ret) + dev_err(&handle->pdev->dev, + "Unmap ring from vector fail. vectorid=%d, ret =%d\n", + vector_id, ret); + + return ret; +} + +static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id, + bool en_uc, bool en_mc, bool en_bc) +{ + struct hclge_vport *vport = &hdev->vport[vf_id]; + struct hnae3_handle *handle = &vport->nic; + struct hclge_promisc_cfg_cmd *req; + struct hclge_desc desc; + bool uc_tx_en = en_uc; + u8 promisc_cfg = 0; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false); + + req = (struct hclge_promisc_cfg_cmd *)desc.data; + req->vf_id = vf_id; + + if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags)) + uc_tx_en = false; + + hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_RX_EN, en_uc ? 1 : 0); + hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_RX_EN, en_mc ? 1 : 0); + hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_RX_EN, en_bc ? 1 : 0); + hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_TX_EN, uc_tx_en ? 1 : 0); + hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_TX_EN, en_mc ? 1 : 0); + hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_TX_EN, en_bc ? 1 : 0); + req->extend_promisc = promisc_cfg; + + /* to be compatible with DEVICE_VERSION_V1/2 */ + promisc_cfg = 0; + hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_UC, en_uc ? 1 : 0); + hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_MC, en_mc ? 1 : 0); + hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_BC, en_bc ? 1 : 0); + hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_TX_EN, 1); + hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_RX_EN, 1); + req->promisc = promisc_cfg; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to set vport %u promisc mode, ret = %d.\n", + vf_id, ret); + + return ret; +} + +int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc, + bool en_mc_pmc, bool en_bc_pmc) +{ + return hclge_cmd_set_promisc_mode(vport->back, vport->vport_id, + en_uc_pmc, en_mc_pmc, en_bc_pmc); +} + +static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, + bool en_mc_pmc) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + bool en_bc_pmc = true; + + /* For device whose version below V2, if broadcast promisc enabled, + * vlan filter is always bypassed. So broadcast promisc should be + * disabled until user enable promisc mode + */ + if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) + en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false; + + return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc, + en_bc_pmc); +} + +static void hclge_request_update_promisc_mode(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + + set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state); +} + +static void hclge_sync_fd_state(struct hclge_dev *hdev) +{ + if (hlist_empty(&hdev->fd_rule_list)) + hdev->fd_active_type = HCLGE_FD_RULE_NONE; +} + +static void hclge_fd_inc_rule_cnt(struct hclge_dev *hdev, u16 location) +{ + if (!test_bit(location, hdev->fd_bmap)) { + set_bit(location, hdev->fd_bmap); + hdev->hclge_fd_rule_num++; + } +} + +static void hclge_fd_dec_rule_cnt(struct hclge_dev *hdev, u16 location) +{ + if (test_bit(location, hdev->fd_bmap)) { + clear_bit(location, hdev->fd_bmap); + hdev->hclge_fd_rule_num--; + } +} + +static void hclge_fd_free_node(struct hclge_dev *hdev, + struct hclge_fd_rule *rule) +{ + hlist_del(&rule->rule_node); + kfree(rule); + hclge_sync_fd_state(hdev); +} + +static void hclge_update_fd_rule_node(struct hclge_dev *hdev, + struct hclge_fd_rule *old_rule, + struct hclge_fd_rule *new_rule, + enum HCLGE_FD_NODE_STATE state) +{ + switch (state) { + case HCLGE_FD_TO_ADD: + case HCLGE_FD_ACTIVE: + /* 1) if the new state is TO_ADD, just replace the old rule + * with the same location, no matter its state, because the + * new rule will be configured to the hardware. + * 2) if the new state is ACTIVE, it means the new rule + * has been configured to the hardware, so just replace + * the old rule node with the same location. + * 3) for it doesn't add a new node to the list, so it's + * unnecessary to update the rule number and fd_bmap. + */ + new_rule->rule_node.next = old_rule->rule_node.next; + new_rule->rule_node.pprev = old_rule->rule_node.pprev; + memcpy(old_rule, new_rule, sizeof(*old_rule)); + kfree(new_rule); + break; + case HCLGE_FD_DELETED: + hclge_fd_dec_rule_cnt(hdev, old_rule->location); + hclge_fd_free_node(hdev, old_rule); + break; + case HCLGE_FD_TO_DEL: + /* if new request is TO_DEL, and old rule is existent + * 1) the state of old rule is TO_DEL, we need do nothing, + * because we delete rule by location, other rule content + * is unncessary. + * 2) the state of old rule is ACTIVE, we need to change its + * state to TO_DEL, so the rule will be deleted when periodic + * task being scheduled. + * 3) the state of old rule is TO_ADD, it means the rule hasn't + * been added to hardware, so we just delete the rule node from + * fd_rule_list directly. + */ + if (old_rule->state == HCLGE_FD_TO_ADD) { + hclge_fd_dec_rule_cnt(hdev, old_rule->location); + hclge_fd_free_node(hdev, old_rule); + return; + } + old_rule->state = HCLGE_FD_TO_DEL; + break; + } +} + +static struct hclge_fd_rule *hclge_find_fd_rule(struct hlist_head *hlist, + u16 location, + struct hclge_fd_rule **parent) +{ + struct hclge_fd_rule *rule; + struct hlist_node *node; + + hlist_for_each_entry_safe(rule, node, hlist, rule_node) { + if (rule->location == location) + return rule; + else if (rule->location > location) + return NULL; + /* record the parent node, use to keep the nodes in fd_rule_list + * in ascend order. + */ + *parent = rule; + } + + return NULL; +} + +/* insert fd rule node in ascend order according to rule->location */ +static void hclge_fd_insert_rule_node(struct hlist_head *hlist, + struct hclge_fd_rule *rule, + struct hclge_fd_rule *parent) +{ + INIT_HLIST_NODE(&rule->rule_node); + + if (parent) + hlist_add_behind(&rule->rule_node, &parent->rule_node); + else + hlist_add_head(&rule->rule_node, hlist); +} + +static int hclge_fd_set_user_def_cmd(struct hclge_dev *hdev, + struct hclge_fd_user_def_cfg *cfg) +{ + struct hclge_fd_user_def_cfg_cmd *req; + struct hclge_desc desc; + u16 data = 0; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_USER_DEF_OP, false); + + req = (struct hclge_fd_user_def_cfg_cmd *)desc.data; + + hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[0].ref_cnt > 0); + hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M, + HCLGE_FD_USER_DEF_OFT_S, cfg[0].offset); + req->ol2_cfg = cpu_to_le16(data); + + data = 0; + hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[1].ref_cnt > 0); + hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M, + HCLGE_FD_USER_DEF_OFT_S, cfg[1].offset); + req->ol3_cfg = cpu_to_le16(data); + + data = 0; + hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[2].ref_cnt > 0); + hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M, + HCLGE_FD_USER_DEF_OFT_S, cfg[2].offset); + req->ol4_cfg = cpu_to_le16(data); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to set fd user def data, ret= %d\n", ret); + return ret; +} + +static void hclge_sync_fd_user_def_cfg(struct hclge_dev *hdev, bool locked) +{ + int ret; + + if (!test_and_clear_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state)) + return; + + if (!locked) + spin_lock_bh(&hdev->fd_rule_lock); + + ret = hclge_fd_set_user_def_cmd(hdev, hdev->fd_cfg.user_def_cfg); + if (ret) + set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state); + + if (!locked) + spin_unlock_bh(&hdev->fd_rule_lock); +} + +static int hclge_fd_check_user_def_refcnt(struct hclge_dev *hdev, + struct hclge_fd_rule *rule) +{ + struct hlist_head *hlist = &hdev->fd_rule_list; + struct hclge_fd_rule *fd_rule, *parent = NULL; + struct hclge_fd_user_def_info *info, *old_info; + struct hclge_fd_user_def_cfg *cfg; + + if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE || + rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE) + return 0; + + /* for valid layer is start from 1, so need minus 1 to get the cfg */ + cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1]; + info = &rule->ep.user_def; + + if (!cfg->ref_cnt || cfg->offset == info->offset) + return 0; + + if (cfg->ref_cnt > 1) + goto error; + + fd_rule = hclge_find_fd_rule(hlist, rule->location, &parent); + if (fd_rule) { + old_info = &fd_rule->ep.user_def; + if (info->layer == old_info->layer) + return 0; + } + +error: + dev_err(&hdev->pdev->dev, + "No available offset for layer%d fd rule, each layer only support one user def offset.\n", + info->layer + 1); + return -ENOSPC; +} + +static void hclge_fd_inc_user_def_refcnt(struct hclge_dev *hdev, + struct hclge_fd_rule *rule) +{ + struct hclge_fd_user_def_cfg *cfg; + + if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE || + rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE) + return; + + cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1]; + if (!cfg->ref_cnt) { + cfg->offset = rule->ep.user_def.offset; + set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state); + } + cfg->ref_cnt++; +} + +static void hclge_fd_dec_user_def_refcnt(struct hclge_dev *hdev, + struct hclge_fd_rule *rule) +{ + struct hclge_fd_user_def_cfg *cfg; + + if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE || + rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE) + return; + + cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1]; + if (!cfg->ref_cnt) + return; + + cfg->ref_cnt--; + if (!cfg->ref_cnt) { + cfg->offset = 0; + set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state); + } +} + +static void hclge_update_fd_list(struct hclge_dev *hdev, + enum HCLGE_FD_NODE_STATE state, u16 location, + struct hclge_fd_rule *new_rule) +{ + struct hlist_head *hlist = &hdev->fd_rule_list; + struct hclge_fd_rule *fd_rule, *parent = NULL; + + fd_rule = hclge_find_fd_rule(hlist, location, &parent); + if (fd_rule) { + hclge_fd_dec_user_def_refcnt(hdev, fd_rule); + if (state == HCLGE_FD_ACTIVE) + hclge_fd_inc_user_def_refcnt(hdev, new_rule); + hclge_sync_fd_user_def_cfg(hdev, true); + + hclge_update_fd_rule_node(hdev, fd_rule, new_rule, state); + return; + } + + /* it's unlikely to fail here, because we have checked the rule + * exist before. + */ + if (unlikely(state == HCLGE_FD_TO_DEL || state == HCLGE_FD_DELETED)) { + dev_warn(&hdev->pdev->dev, + "failed to delete fd rule %u, it's inexistent\n", + location); + return; + } + + hclge_fd_inc_user_def_refcnt(hdev, new_rule); + hclge_sync_fd_user_def_cfg(hdev, true); + + hclge_fd_insert_rule_node(hlist, new_rule, parent); + hclge_fd_inc_rule_cnt(hdev, new_rule->location); + + if (state == HCLGE_FD_TO_ADD) { + set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); + hclge_task_schedule(hdev, 0); + } +} + +static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode) +{ + struct hclge_get_fd_mode_cmd *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true); + + req = (struct hclge_get_fd_mode_cmd *)desc.data; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret); + return ret; + } + + *fd_mode = req->mode; + + return ret; +} + +static int hclge_get_fd_allocation(struct hclge_dev *hdev, + u32 *stage1_entry_num, + u32 *stage2_entry_num, + u16 *stage1_counter_num, + u16 *stage2_counter_num) +{ + struct hclge_get_fd_allocation_cmd *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true); + + req = (struct hclge_get_fd_allocation_cmd *)desc.data; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n", + ret); + return ret; + } + + *stage1_entry_num = le32_to_cpu(req->stage1_entry_num); + *stage2_entry_num = le32_to_cpu(req->stage2_entry_num); + *stage1_counter_num = le16_to_cpu(req->stage1_counter_num); + *stage2_counter_num = le16_to_cpu(req->stage2_counter_num); + + return ret; +} + +static int hclge_set_fd_key_config(struct hclge_dev *hdev, + enum HCLGE_FD_STAGE stage_num) +{ + struct hclge_set_fd_key_config_cmd *req; + struct hclge_fd_key_cfg *stage; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false); + + req = (struct hclge_set_fd_key_config_cmd *)desc.data; + stage = &hdev->fd_cfg.key_cfg[stage_num]; + req->stage = stage_num; + req->key_select = stage->key_sel; + req->inner_sipv6_word_en = stage->inner_sipv6_word_en; + req->inner_dipv6_word_en = stage->inner_dipv6_word_en; + req->outer_sipv6_word_en = stage->outer_sipv6_word_en; + req->outer_dipv6_word_en = stage->outer_dipv6_word_en; + req->tuple_mask = cpu_to_le32(~stage->tuple_active); + req->meta_data_mask = cpu_to_le32(~stage->meta_data_active); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret); + + return ret; +} + +static void hclge_fd_disable_user_def(struct hclge_dev *hdev) +{ + struct hclge_fd_user_def_cfg *cfg = hdev->fd_cfg.user_def_cfg; + + spin_lock_bh(&hdev->fd_rule_lock); + memset(cfg, 0, sizeof(hdev->fd_cfg.user_def_cfg)); + spin_unlock_bh(&hdev->fd_rule_lock); + + hclge_fd_set_user_def_cmd(hdev, cfg); +} + +static int hclge_init_fd_config(struct hclge_dev *hdev) +{ +#define LOW_2_WORDS 0x03 + struct hclge_fd_key_cfg *key_cfg; + int ret; + + if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) + return 0; + + ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode); + if (ret) + return ret; + + switch (hdev->fd_cfg.fd_mode) { + case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1: + hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH; + break; + case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1: + hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2; + break; + default: + dev_err(&hdev->pdev->dev, + "Unsupported flow director mode %u\n", + hdev->fd_cfg.fd_mode); + return -EOPNOTSUPP; + } + + key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1]; + key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE; + key_cfg->inner_sipv6_word_en = LOW_2_WORDS; + key_cfg->inner_dipv6_word_en = LOW_2_WORDS; + key_cfg->outer_sipv6_word_en = 0; + key_cfg->outer_dipv6_word_en = 0; + + key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) | + BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) | + BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) | + BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT); + + /* If use max 400bit key, we can support tuples for ether type */ + if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) { + key_cfg->tuple_active |= + BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC); + if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) + key_cfg->tuple_active |= HCLGE_FD_TUPLE_USER_DEF_TUPLES; + } + + /* roce_type is used to filter roce frames + * dst_vport is used to specify the rule + */ + key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT); + + ret = hclge_get_fd_allocation(hdev, + &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1], + &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2], + &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1], + &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]); + if (ret) + return ret; + + return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1); +} + +static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x, + int loc, u8 *key, bool is_add) +{ + struct hclge_fd_tcam_config_1_cmd *req1; + struct hclge_fd_tcam_config_2_cmd *req2; + struct hclge_fd_tcam_config_3_cmd *req3; + struct hclge_desc desc[3]; + int ret; + + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false); + desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false); + desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false); + + req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data; + req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data; + req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data; + + req1->stage = stage; + req1->xy_sel = sel_x ? 1 : 0; + hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0); + req1->index = cpu_to_le32(loc); + req1->entry_vld = sel_x ? is_add : 0; + + if (key) { + memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data)); + memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)], + sizeof(req2->tcam_data)); + memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) + + sizeof(req2->tcam_data)], sizeof(req3->tcam_data)); + } + + ret = hclge_cmd_send(&hdev->hw, desc, 3); + if (ret) + dev_err(&hdev->pdev->dev, + "config tcam key fail, ret=%d\n", + ret); + + return ret; +} + +static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc, + struct hclge_fd_ad_data *action) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); + struct hclge_fd_ad_config_cmd *req; + struct hclge_desc desc; + u64 ad_data = 0; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false); + + req = (struct hclge_fd_ad_config_cmd *)desc.data; + req->index = cpu_to_le32(loc); + req->stage = stage; + + hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B, + action->write_rule_id_to_bd); + hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S, + action->rule_id); + if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) { + hnae3_set_bit(ad_data, HCLGE_FD_AD_TC_OVRD_B, + action->override_tc); + hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M, + HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size); + } + ad_data <<= 32; + hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet); + hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B, + action->forward_to_direct_queue); + hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S, + action->queue_id); + hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter); + hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M, + HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id); + hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage); + hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S, + action->counter_id); + + req->ad_data = cpu_to_le64(ad_data); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret); + + return ret; +} + +static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y, + struct hclge_fd_rule *rule) +{ + int offset, moffset, ip_offset; + enum HCLGE_FD_KEY_OPT key_opt; + u16 tmp_x_s, tmp_y_s; + u32 tmp_x_l, tmp_y_l; + u8 *p = (u8 *)rule; + int i; + + if (rule->unused_tuple & BIT(tuple_bit)) + return true; + + key_opt = tuple_key_info[tuple_bit].key_opt; + offset = tuple_key_info[tuple_bit].offset; + moffset = tuple_key_info[tuple_bit].moffset; + + switch (key_opt) { + case KEY_OPT_U8: + calc_x(*key_x, p[offset], p[moffset]); + calc_y(*key_y, p[offset], p[moffset]); + + return true; + case KEY_OPT_LE16: + calc_x(tmp_x_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset])); + calc_y(tmp_y_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset])); + *(__le16 *)key_x = cpu_to_le16(tmp_x_s); + *(__le16 *)key_y = cpu_to_le16(tmp_y_s); + + return true; + case KEY_OPT_LE32: + calc_x(tmp_x_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset])); + calc_y(tmp_y_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset])); + *(__le32 *)key_x = cpu_to_le32(tmp_x_l); + *(__le32 *)key_y = cpu_to_le32(tmp_y_l); + + return true; + case KEY_OPT_MAC: + for (i = 0; i < ETH_ALEN; i++) { + calc_x(key_x[ETH_ALEN - 1 - i], p[offset + i], + p[moffset + i]); + calc_y(key_y[ETH_ALEN - 1 - i], p[offset + i], + p[moffset + i]); + } + + return true; + case KEY_OPT_IP: + ip_offset = IPV4_INDEX * sizeof(u32); + calc_x(tmp_x_l, *(u32 *)(&p[offset + ip_offset]), + *(u32 *)(&p[moffset + ip_offset])); + calc_y(tmp_y_l, *(u32 *)(&p[offset + ip_offset]), + *(u32 *)(&p[moffset + ip_offset])); + *(__le32 *)key_x = cpu_to_le32(tmp_x_l); + *(__le32 *)key_y = cpu_to_le32(tmp_y_l); + + return true; + default: + return false; + } +} + +static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id, + u8 vf_id, u8 network_port_id) +{ + u32 port_number = 0; + + if (port_type == HOST_PORT) { + hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S, + pf_id); + hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S, + vf_id); + hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT); + } else { + hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M, + HCLGE_NETWORK_PORT_ID_S, network_port_id); + hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT); + } + + return port_number; +} + +static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg, + __le32 *key_x, __le32 *key_y, + struct hclge_fd_rule *rule) +{ + u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number; + u8 cur_pos = 0, tuple_size, shift_bits; + unsigned int i; + + for (i = 0; i < MAX_META_DATA; i++) { + tuple_size = meta_data_key_info[i].key_length; + tuple_bit = key_cfg->meta_data_active & BIT(i); + + switch (tuple_bit) { + case BIT(ROCE_TYPE): + hnae3_set_bit(meta_data, cur_pos, NIC_PACKET); + cur_pos += tuple_size; + break; + case BIT(DST_VPORT): + port_number = hclge_get_port_number(HOST_PORT, 0, + rule->vf_id, 0); + hnae3_set_field(meta_data, + GENMASK(cur_pos + tuple_size, cur_pos), + cur_pos, port_number); + cur_pos += tuple_size; + break; + default: + break; + } + } + + calc_x(tmp_x, meta_data, 0xFFFFFFFF); + calc_y(tmp_y, meta_data, 0xFFFFFFFF); + shift_bits = sizeof(meta_data) * 8 - cur_pos; + + *key_x = cpu_to_le32(tmp_x << shift_bits); + *key_y = cpu_to_le32(tmp_y << shift_bits); +} + +/* A complete key is combined with meta data key and tuple key. + * Meta data key is stored at the MSB region, and tuple key is stored at + * the LSB region, unused bits will be filled 0. + */ +static int hclge_config_key(struct hclge_dev *hdev, u8 stage, + struct hclge_fd_rule *rule) +{ + struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage]; + u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES]; + u8 *cur_key_x, *cur_key_y; + u8 meta_data_region; + u8 tuple_size; + int ret; + u32 i; + + memset(key_x, 0, sizeof(key_x)); + memset(key_y, 0, sizeof(key_y)); + cur_key_x = key_x; + cur_key_y = key_y; + + for (i = 0; i < MAX_TUPLE; i++) { + bool tuple_valid; + + tuple_size = tuple_key_info[i].key_length / 8; + if (!(key_cfg->tuple_active & BIT(i))) + continue; + + tuple_valid = hclge_fd_convert_tuple(i, cur_key_x, + cur_key_y, rule); + if (tuple_valid) { + cur_key_x += tuple_size; + cur_key_y += tuple_size; + } + } + + meta_data_region = hdev->fd_cfg.max_key_length / 8 - + MAX_META_DATA_LENGTH / 8; + + hclge_fd_convert_meta_data(key_cfg, + (__le32 *)(key_x + meta_data_region), + (__le32 *)(key_y + meta_data_region), + rule); + + ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y, + true); + if (ret) { + dev_err(&hdev->pdev->dev, + "fd key_y config fail, loc=%u, ret=%d\n", + rule->queue_id, ret); + return ret; + } + + ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x, + true); + if (ret) + dev_err(&hdev->pdev->dev, + "fd key_x config fail, loc=%u, ret=%d\n", + rule->queue_id, ret); + return ret; +} + +static int hclge_config_action(struct hclge_dev *hdev, u8 stage, + struct hclge_fd_rule *rule) +{ + struct hclge_vport *vport = hdev->vport; + struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; + struct hclge_fd_ad_data ad_data; + + memset(&ad_data, 0, sizeof(struct hclge_fd_ad_data)); + ad_data.ad_id = rule->location; + + if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) { + ad_data.drop_packet = true; + } else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) { + ad_data.override_tc = true; + ad_data.queue_id = + kinfo->tc_info.tqp_offset[rule->cls_flower.tc]; + ad_data.tc_size = + ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]); + } else { + ad_data.forward_to_direct_queue = true; + ad_data.queue_id = rule->queue_id; + } + + if (hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1]) { + ad_data.use_counter = true; + ad_data.counter_id = rule->vf_id % + hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1]; + } else { + ad_data.use_counter = false; + ad_data.counter_id = 0; + } + + ad_data.use_next_stage = false; + ad_data.next_input_key = 0; + + ad_data.write_rule_id_to_bd = true; + ad_data.rule_id = rule->location; + + return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data); +} + +static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec, + u32 *unused_tuple) +{ + if (!spec || !unused_tuple) + return -EINVAL; + + *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC); + + if (!spec->ip4src) + *unused_tuple |= BIT(INNER_SRC_IP); + + if (!spec->ip4dst) + *unused_tuple |= BIT(INNER_DST_IP); + + if (!spec->psrc) + *unused_tuple |= BIT(INNER_SRC_PORT); + + if (!spec->pdst) + *unused_tuple |= BIT(INNER_DST_PORT); + + if (!spec->tos) + *unused_tuple |= BIT(INNER_IP_TOS); + + return 0; +} + +static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec, + u32 *unused_tuple) +{ + if (!spec || !unused_tuple) + return -EINVAL; + + *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | + BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT); + + if (!spec->ip4src) + *unused_tuple |= BIT(INNER_SRC_IP); + + if (!spec->ip4dst) + *unused_tuple |= BIT(INNER_DST_IP); + + if (!spec->tos) + *unused_tuple |= BIT(INNER_IP_TOS); + + if (!spec->proto) + *unused_tuple |= BIT(INNER_IP_PROTO); + + if (spec->l4_4_bytes) + return -EOPNOTSUPP; + + if (spec->ip_ver != ETH_RX_NFC_IP4) + return -EOPNOTSUPP; + + return 0; +} + +static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec, + u32 *unused_tuple) +{ + if (!spec || !unused_tuple) + return -EINVAL; + + *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC); + + /* check whether src/dst ip address used */ + if (ipv6_addr_any((struct in6_addr *)spec->ip6src)) + *unused_tuple |= BIT(INNER_SRC_IP); + + if (ipv6_addr_any((struct in6_addr *)spec->ip6dst)) + *unused_tuple |= BIT(INNER_DST_IP); + + if (!spec->psrc) + *unused_tuple |= BIT(INNER_SRC_PORT); + + if (!spec->pdst) + *unused_tuple |= BIT(INNER_DST_PORT); + + if (!spec->tclass) + *unused_tuple |= BIT(INNER_IP_TOS); + + return 0; +} + +static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec, + u32 *unused_tuple) +{ + if (!spec || !unused_tuple) + return -EINVAL; + + *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | + BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT); + + /* check whether src/dst ip address used */ + if (ipv6_addr_any((struct in6_addr *)spec->ip6src)) + *unused_tuple |= BIT(INNER_SRC_IP); + + if (ipv6_addr_any((struct in6_addr *)spec->ip6dst)) + *unused_tuple |= BIT(INNER_DST_IP); + + if (!spec->l4_proto) + *unused_tuple |= BIT(INNER_IP_PROTO); + + if (!spec->tclass) + *unused_tuple |= BIT(INNER_IP_TOS); + + if (spec->l4_4_bytes) + return -EOPNOTSUPP; + + return 0; +} + +static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple) +{ + if (!spec || !unused_tuple) + return -EINVAL; + + *unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) | + BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) | + BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO); + + if (is_zero_ether_addr(spec->h_source)) + *unused_tuple |= BIT(INNER_SRC_MAC); + + if (is_zero_ether_addr(spec->h_dest)) + *unused_tuple |= BIT(INNER_DST_MAC); + + if (!spec->h_proto) + *unused_tuple |= BIT(INNER_ETH_TYPE); + + return 0; +} + +static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev, + struct ethtool_rx_flow_spec *fs, + u32 *unused_tuple) +{ + if (fs->flow_type & FLOW_EXT) { + if (fs->h_ext.vlan_etype) { + dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n"); + return -EOPNOTSUPP; + } + + if (!fs->h_ext.vlan_tci) + *unused_tuple |= BIT(INNER_VLAN_TAG_FST); + + if (fs->m_ext.vlan_tci && + be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) { + dev_err(&hdev->pdev->dev, + "failed to config vlan_tci, invalid vlan_tci: %u, max is %d.\n", + ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1); + return -EINVAL; + } + } else { + *unused_tuple |= BIT(INNER_VLAN_TAG_FST); + } + + if (fs->flow_type & FLOW_MAC_EXT) { + if (hdev->fd_cfg.fd_mode != + HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) { + dev_err(&hdev->pdev->dev, + "FLOW_MAC_EXT is not supported in current fd mode!\n"); + return -EOPNOTSUPP; + } + + if (is_zero_ether_addr(fs->h_ext.h_dest)) + *unused_tuple |= BIT(INNER_DST_MAC); + else + *unused_tuple &= ~BIT(INNER_DST_MAC); + } + + return 0; +} + +static int hclge_fd_get_user_def_layer(u32 flow_type, u32 *unused_tuple, + struct hclge_fd_user_def_info *info) +{ + switch (flow_type) { + case ETHER_FLOW: + info->layer = HCLGE_FD_USER_DEF_L2; + *unused_tuple &= ~BIT(INNER_L2_RSV); + break; + case IP_USER_FLOW: + case IPV6_USER_FLOW: + info->layer = HCLGE_FD_USER_DEF_L3; + *unused_tuple &= ~BIT(INNER_L3_RSV); + break; + case TCP_V4_FLOW: + case UDP_V4_FLOW: + case TCP_V6_FLOW: + case UDP_V6_FLOW: + info->layer = HCLGE_FD_USER_DEF_L4; + *unused_tuple &= ~BIT(INNER_L4_RSV); + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static bool hclge_fd_is_user_def_all_masked(struct ethtool_rx_flow_spec *fs) +{ + return be32_to_cpu(fs->m_ext.data[1] | fs->m_ext.data[0]) == 0; +} + +static int hclge_fd_parse_user_def_field(struct hclge_dev *hdev, + struct ethtool_rx_flow_spec *fs, + u32 *unused_tuple, + struct hclge_fd_user_def_info *info) +{ + u32 tuple_active = hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1].tuple_active; + u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT); + u16 data, offset, data_mask, offset_mask; + int ret; + + info->layer = HCLGE_FD_USER_DEF_NONE; + *unused_tuple |= HCLGE_FD_TUPLE_USER_DEF_TUPLES; + + if (!(fs->flow_type & FLOW_EXT) || hclge_fd_is_user_def_all_masked(fs)) + return 0; + + /* user-def data from ethtool is 64 bit value, the bit0~15 is used + * for data, and bit32~47 is used for offset. + */ + data = be32_to_cpu(fs->h_ext.data[1]) & HCLGE_FD_USER_DEF_DATA; + data_mask = be32_to_cpu(fs->m_ext.data[1]) & HCLGE_FD_USER_DEF_DATA; + offset = be32_to_cpu(fs->h_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET; + offset_mask = be32_to_cpu(fs->m_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET; + + if (!(tuple_active & HCLGE_FD_TUPLE_USER_DEF_TUPLES)) { + dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n"); + return -EOPNOTSUPP; + } + + if (offset > HCLGE_FD_MAX_USER_DEF_OFFSET) { + dev_err(&hdev->pdev->dev, + "user-def offset[%u] should be no more than %u\n", + offset, HCLGE_FD_MAX_USER_DEF_OFFSET); + return -EINVAL; + } + + if (offset_mask != HCLGE_FD_USER_DEF_OFFSET_UNMASK) { + dev_err(&hdev->pdev->dev, "user-def offset can't be masked\n"); + return -EINVAL; + } + + ret = hclge_fd_get_user_def_layer(flow_type, unused_tuple, info); + if (ret) { + dev_err(&hdev->pdev->dev, + "unsupported flow type for user-def bytes, ret = %d\n", + ret); + return ret; + } + + info->data = data; + info->data_mask = data_mask; + info->offset = offset; + + return 0; +} + +static int hclge_fd_check_spec(struct hclge_dev *hdev, + struct ethtool_rx_flow_spec *fs, + u32 *unused_tuple, + struct hclge_fd_user_def_info *info) +{ + u32 flow_type; + int ret; + + if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) { + dev_err(&hdev->pdev->dev, + "failed to config fd rules, invalid rule location: %u, max is %u\n.", + fs->location, + hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1); + return -EINVAL; + } + + ret = hclge_fd_parse_user_def_field(hdev, fs, unused_tuple, info); + if (ret) + return ret; + + flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT); + switch (flow_type) { + case SCTP_V4_FLOW: + case TCP_V4_FLOW: + case UDP_V4_FLOW: + ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec, + unused_tuple); + break; + case IP_USER_FLOW: + ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec, + unused_tuple); + break; + case SCTP_V6_FLOW: + case TCP_V6_FLOW: + case UDP_V6_FLOW: + ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec, + unused_tuple); + break; + case IPV6_USER_FLOW: + ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec, + unused_tuple); + break; + case ETHER_FLOW: + if (hdev->fd_cfg.fd_mode != + HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) { + dev_err(&hdev->pdev->dev, + "ETHER_FLOW is not supported in current fd mode!\n"); + return -EOPNOTSUPP; + } + + ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec, + unused_tuple); + break; + default: + dev_err(&hdev->pdev->dev, + "unsupported protocol type, protocol type = %#x\n", + flow_type); + return -EOPNOTSUPP; + } + + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to check flow union tuple, ret = %d\n", + ret); + return ret; + } + + return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple); +} + +static void hclge_fd_get_tcpip4_tuple(struct hclge_dev *hdev, + struct ethtool_rx_flow_spec *fs, + struct hclge_fd_rule *rule, u8 ip_proto) +{ + rule->tuples.src_ip[IPV4_INDEX] = + be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src); + rule->tuples_mask.src_ip[IPV4_INDEX] = + be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src); + + rule->tuples.dst_ip[IPV4_INDEX] = + be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst); + rule->tuples_mask.dst_ip[IPV4_INDEX] = + be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst); + + rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc); + rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc); + + rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst); + rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst); + + rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos; + rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos; + + rule->tuples.ether_proto = ETH_P_IP; + rule->tuples_mask.ether_proto = 0xFFFF; + + rule->tuples.ip_proto = ip_proto; + rule->tuples_mask.ip_proto = 0xFF; +} + +static void hclge_fd_get_ip4_tuple(struct hclge_dev *hdev, + struct ethtool_rx_flow_spec *fs, + struct hclge_fd_rule *rule) +{ + rule->tuples.src_ip[IPV4_INDEX] = + be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src); + rule->tuples_mask.src_ip[IPV4_INDEX] = + be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src); + + rule->tuples.dst_ip[IPV4_INDEX] = + be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst); + rule->tuples_mask.dst_ip[IPV4_INDEX] = + be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst); + + rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos; + rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos; + + rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto; + rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto; + + rule->tuples.ether_proto = ETH_P_IP; + rule->tuples_mask.ether_proto = 0xFFFF; +} + +static void hclge_fd_get_tcpip6_tuple(struct hclge_dev *hdev, + struct ethtool_rx_flow_spec *fs, + struct hclge_fd_rule *rule, u8 ip_proto) +{ + be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.tcp_ip6_spec.ip6src, + IPV6_SIZE); + be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.tcp_ip6_spec.ip6src, + IPV6_SIZE); + + be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.tcp_ip6_spec.ip6dst, + IPV6_SIZE); + be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.tcp_ip6_spec.ip6dst, + IPV6_SIZE); + + rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc); + rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc); + + rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst); + rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst); + + rule->tuples.ether_proto = ETH_P_IPV6; + rule->tuples_mask.ether_proto = 0xFFFF; + + rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass; + rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass; + + rule->tuples.ip_proto = ip_proto; + rule->tuples_mask.ip_proto = 0xFF; +} + +static void hclge_fd_get_ip6_tuple(struct hclge_dev *hdev, + struct ethtool_rx_flow_spec *fs, + struct hclge_fd_rule *rule) +{ + be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.usr_ip6_spec.ip6src, + IPV6_SIZE); + be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.usr_ip6_spec.ip6src, + IPV6_SIZE); + + be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.usr_ip6_spec.ip6dst, + IPV6_SIZE); + be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.usr_ip6_spec.ip6dst, + IPV6_SIZE); + + rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto; + rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto; + + rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass; + rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass; + + rule->tuples.ether_proto = ETH_P_IPV6; + rule->tuples_mask.ether_proto = 0xFFFF; +} + +static void hclge_fd_get_ether_tuple(struct hclge_dev *hdev, + struct ethtool_rx_flow_spec *fs, + struct hclge_fd_rule *rule) +{ + ether_addr_copy(rule->tuples.src_mac, fs->h_u.ether_spec.h_source); + ether_addr_copy(rule->tuples_mask.src_mac, fs->m_u.ether_spec.h_source); + + ether_addr_copy(rule->tuples.dst_mac, fs->h_u.ether_spec.h_dest); + ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_u.ether_spec.h_dest); + + rule->tuples.ether_proto = be16_to_cpu(fs->h_u.ether_spec.h_proto); + rule->tuples_mask.ether_proto = be16_to_cpu(fs->m_u.ether_spec.h_proto); +} + +static void hclge_fd_get_user_def_tuple(struct hclge_fd_user_def_info *info, + struct hclge_fd_rule *rule) +{ + switch (info->layer) { + case HCLGE_FD_USER_DEF_L2: + rule->tuples.l2_user_def = info->data; + rule->tuples_mask.l2_user_def = info->data_mask; + break; + case HCLGE_FD_USER_DEF_L3: + rule->tuples.l3_user_def = info->data; + rule->tuples_mask.l3_user_def = info->data_mask; + break; + case HCLGE_FD_USER_DEF_L4: + rule->tuples.l4_user_def = (u32)info->data << 16; + rule->tuples_mask.l4_user_def = (u32)info->data_mask << 16; + break; + default: + break; + } + + rule->ep.user_def = *info; +} + +static int hclge_fd_get_tuple(struct hclge_dev *hdev, + struct ethtool_rx_flow_spec *fs, + struct hclge_fd_rule *rule, + struct hclge_fd_user_def_info *info) +{ + u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT); + + switch (flow_type) { + case SCTP_V4_FLOW: + hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_SCTP); + break; + case TCP_V4_FLOW: + hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_TCP); + break; + case UDP_V4_FLOW: + hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_UDP); + break; + case IP_USER_FLOW: + hclge_fd_get_ip4_tuple(hdev, fs, rule); + break; + case SCTP_V6_FLOW: + hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_SCTP); + break; + case TCP_V6_FLOW: + hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_TCP); + break; + case UDP_V6_FLOW: + hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_UDP); + break; + case IPV6_USER_FLOW: + hclge_fd_get_ip6_tuple(hdev, fs, rule); + break; + case ETHER_FLOW: + hclge_fd_get_ether_tuple(hdev, fs, rule); + break; + default: + return -EOPNOTSUPP; + } + + if (fs->flow_type & FLOW_EXT) { + rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci); + rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci); + hclge_fd_get_user_def_tuple(info, rule); + } + + if (fs->flow_type & FLOW_MAC_EXT) { + ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest); + ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest); + } + + return 0; +} + +static int hclge_fd_config_rule(struct hclge_dev *hdev, + struct hclge_fd_rule *rule) +{ + int ret; + + ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule); + if (ret) + return ret; + + return hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule); +} + +static int hclge_add_fd_entry_common(struct hclge_dev *hdev, + struct hclge_fd_rule *rule) +{ + int ret; + + spin_lock_bh(&hdev->fd_rule_lock); + + if (hdev->fd_active_type != rule->rule_type && + (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE || + hdev->fd_active_type == HCLGE_FD_EP_ACTIVE)) { + dev_err(&hdev->pdev->dev, + "mode conflict(new type %d, active type %d), please delete existent rules first\n", + rule->rule_type, hdev->fd_active_type); + spin_unlock_bh(&hdev->fd_rule_lock); + return -EINVAL; + } + + ret = hclge_fd_check_user_def_refcnt(hdev, rule); + if (ret) + goto out; + + ret = hclge_clear_arfs_rules(hdev); + if (ret) + goto out; + + ret = hclge_fd_config_rule(hdev, rule); + if (ret) + goto out; + + rule->state = HCLGE_FD_ACTIVE; + hdev->fd_active_type = rule->rule_type; + hclge_update_fd_list(hdev, rule->state, rule->location, rule); + +out: + spin_unlock_bh(&hdev->fd_rule_lock); + return ret; +} + +static bool hclge_is_cls_flower_active(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE; +} + +static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie, + u16 *vport_id, u8 *action, u16 *queue_id) +{ + struct hclge_vport *vport = hdev->vport; + + if (ring_cookie == RX_CLS_FLOW_DISC) { + *action = HCLGE_FD_ACTION_DROP_PACKET; + } else { + u32 ring = ethtool_get_flow_spec_ring(ring_cookie); + u8 vf = ethtool_get_flow_spec_ring_vf(ring_cookie); + u16 tqps; + + /* To keep consistent with user's configuration, minus 1 when + * printing 'vf', because vf id from ethtool is added 1 for vf. + */ + if (vf > hdev->num_req_vfs) { + dev_err(&hdev->pdev->dev, + "Error: vf id (%u) should be less than %u\n", + vf - 1U, hdev->num_req_vfs); + return -EINVAL; + } + + *vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id; + tqps = hdev->vport[vf].nic.kinfo.num_tqps; + + if (ring >= tqps) { + dev_err(&hdev->pdev->dev, + "Error: queue id (%u) > max tqp num (%u)\n", + ring, tqps - 1U); + return -EINVAL; + } + + *action = HCLGE_FD_ACTION_SELECT_QUEUE; + *queue_id = ring; + } + + return 0; +} + +static int hclge_add_fd_entry(struct hnae3_handle *handle, + struct ethtool_rxnfc *cmd) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hclge_fd_user_def_info info; + u16 dst_vport_id = 0, q_index = 0; + struct ethtool_rx_flow_spec *fs; + struct hclge_fd_rule *rule; + u32 unused = 0; + u8 action; + int ret; + + if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) { + dev_err(&hdev->pdev->dev, + "flow table director is not supported\n"); + return -EOPNOTSUPP; + } + + if (!hdev->fd_en) { + dev_err(&hdev->pdev->dev, + "please enable flow director first\n"); + return -EOPNOTSUPP; + } + + fs = (struct ethtool_rx_flow_spec *)&cmd->fs; + + ret = hclge_fd_check_spec(hdev, fs, &unused, &info); + if (ret) + return ret; + + ret = hclge_fd_parse_ring_cookie(hdev, fs->ring_cookie, &dst_vport_id, + &action, &q_index); + if (ret) + return ret; + + rule = kzalloc(sizeof(*rule), GFP_KERNEL); + if (!rule) + return -ENOMEM; + + ret = hclge_fd_get_tuple(hdev, fs, rule, &info); + if (ret) { + kfree(rule); + return ret; + } + + rule->flow_type = fs->flow_type; + rule->location = fs->location; + rule->unused_tuple = unused; + rule->vf_id = dst_vport_id; + rule->queue_id = q_index; + rule->action = action; + rule->rule_type = HCLGE_FD_EP_ACTIVE; + + ret = hclge_add_fd_entry_common(hdev, rule); + if (ret) + kfree(rule); + + return ret; +} + +static int hclge_del_fd_entry(struct hnae3_handle *handle, + struct ethtool_rxnfc *cmd) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct ethtool_rx_flow_spec *fs; + int ret; + + if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) + return -EOPNOTSUPP; + + fs = (struct ethtool_rx_flow_spec *)&cmd->fs; + + if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) + return -EINVAL; + + spin_lock_bh(&hdev->fd_rule_lock); + if (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE || + !test_bit(fs->location, hdev->fd_bmap)) { + dev_err(&hdev->pdev->dev, + "Delete fail, rule %u is inexistent\n", fs->location); + spin_unlock_bh(&hdev->fd_rule_lock); + return -ENOENT; + } + + ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location, + NULL, false); + if (ret) + goto out; + + hclge_update_fd_list(hdev, HCLGE_FD_DELETED, fs->location, NULL); + +out: + spin_unlock_bh(&hdev->fd_rule_lock); + return ret; +} + +static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev, + bool clear_list) +{ + struct hclge_fd_rule *rule; + struct hlist_node *node; + u16 location; + + spin_lock_bh(&hdev->fd_rule_lock); + + for_each_set_bit(location, hdev->fd_bmap, + hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) + hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location, + NULL, false); + + if (clear_list) { + hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, + rule_node) { + hlist_del(&rule->rule_node); + kfree(rule); + } + hdev->fd_active_type = HCLGE_FD_RULE_NONE; + hdev->hclge_fd_rule_num = 0; + bitmap_zero(hdev->fd_bmap, + hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]); + } + + spin_unlock_bh(&hdev->fd_rule_lock); +} + +static void hclge_del_all_fd_entries(struct hclge_dev *hdev) +{ + if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) + return; + + hclge_clear_fd_rules_in_list(hdev, true); + hclge_fd_disable_user_def(hdev); +} + +static int hclge_restore_fd_entries(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hclge_fd_rule *rule; + struct hlist_node *node; + + /* Return ok here, because reset error handling will check this + * return value. If error is returned here, the reset process will + * fail. + */ + if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) + return 0; + + /* if fd is disabled, should not restore it when reset */ + if (!hdev->fd_en) + return 0; + + spin_lock_bh(&hdev->fd_rule_lock); + hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { + if (rule->state == HCLGE_FD_ACTIVE) + rule->state = HCLGE_FD_TO_ADD; + } + spin_unlock_bh(&hdev->fd_rule_lock); + set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); + + return 0; +} + +static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle, + struct ethtool_rxnfc *cmd) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + if (!hnae3_ae_dev_fd_supported(hdev->ae_dev) || hclge_is_cls_flower_active(handle)) + return -EOPNOTSUPP; + + cmd->rule_cnt = hdev->hclge_fd_rule_num; + cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]; + + return 0; +} + +static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule, + struct ethtool_tcpip4_spec *spec, + struct ethtool_tcpip4_spec *spec_mask) +{ + spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]); + spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ? + 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]); + + spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]); + spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ? + 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]); + + spec->psrc = cpu_to_be16(rule->tuples.src_port); + spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ? + 0 : cpu_to_be16(rule->tuples_mask.src_port); + + spec->pdst = cpu_to_be16(rule->tuples.dst_port); + spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ? + 0 : cpu_to_be16(rule->tuples_mask.dst_port); + + spec->tos = rule->tuples.ip_tos; + spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ? + 0 : rule->tuples_mask.ip_tos; +} + +static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule, + struct ethtool_usrip4_spec *spec, + struct ethtool_usrip4_spec *spec_mask) +{ + spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]); + spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ? + 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]); + + spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]); + spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ? + 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]); + + spec->tos = rule->tuples.ip_tos; + spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ? + 0 : rule->tuples_mask.ip_tos; + + spec->proto = rule->tuples.ip_proto; + spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ? + 0 : rule->tuples_mask.ip_proto; + + spec->ip_ver = ETH_RX_NFC_IP4; +} + +static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule, + struct ethtool_tcpip6_spec *spec, + struct ethtool_tcpip6_spec *spec_mask) +{ + cpu_to_be32_array(spec->ip6src, + rule->tuples.src_ip, IPV6_SIZE); + cpu_to_be32_array(spec->ip6dst, + rule->tuples.dst_ip, IPV6_SIZE); + if (rule->unused_tuple & BIT(INNER_SRC_IP)) + memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src)); + else + cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip, + IPV6_SIZE); + + if (rule->unused_tuple & BIT(INNER_DST_IP)) + memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst)); + else + cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip, + IPV6_SIZE); + + spec->tclass = rule->tuples.ip_tos; + spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ? + 0 : rule->tuples_mask.ip_tos; + + spec->psrc = cpu_to_be16(rule->tuples.src_port); + spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ? + 0 : cpu_to_be16(rule->tuples_mask.src_port); + + spec->pdst = cpu_to_be16(rule->tuples.dst_port); + spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ? + 0 : cpu_to_be16(rule->tuples_mask.dst_port); +} + +static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule, + struct ethtool_usrip6_spec *spec, + struct ethtool_usrip6_spec *spec_mask) +{ + cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE); + cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE); + if (rule->unused_tuple & BIT(INNER_SRC_IP)) + memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src)); + else + cpu_to_be32_array(spec_mask->ip6src, + rule->tuples_mask.src_ip, IPV6_SIZE); + + if (rule->unused_tuple & BIT(INNER_DST_IP)) + memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst)); + else + cpu_to_be32_array(spec_mask->ip6dst, + rule->tuples_mask.dst_ip, IPV6_SIZE); + + spec->tclass = rule->tuples.ip_tos; + spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ? + 0 : rule->tuples_mask.ip_tos; + + spec->l4_proto = rule->tuples.ip_proto; + spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ? + 0 : rule->tuples_mask.ip_proto; +} + +static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule, + struct ethhdr *spec, + struct ethhdr *spec_mask) +{ + ether_addr_copy(spec->h_source, rule->tuples.src_mac); + ether_addr_copy(spec->h_dest, rule->tuples.dst_mac); + + if (rule->unused_tuple & BIT(INNER_SRC_MAC)) + eth_zero_addr(spec_mask->h_source); + else + ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac); + + if (rule->unused_tuple & BIT(INNER_DST_MAC)) + eth_zero_addr(spec_mask->h_dest); + else + ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac); + + spec->h_proto = cpu_to_be16(rule->tuples.ether_proto); + spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ? + 0 : cpu_to_be16(rule->tuples_mask.ether_proto); +} + +static void hclge_fd_get_user_def_info(struct ethtool_rx_flow_spec *fs, + struct hclge_fd_rule *rule) +{ + if ((rule->unused_tuple & HCLGE_FD_TUPLE_USER_DEF_TUPLES) == + HCLGE_FD_TUPLE_USER_DEF_TUPLES) { + fs->h_ext.data[0] = 0; + fs->h_ext.data[1] = 0; + fs->m_ext.data[0] = 0; + fs->m_ext.data[1] = 0; + } else { + fs->h_ext.data[0] = cpu_to_be32(rule->ep.user_def.offset); + fs->h_ext.data[1] = cpu_to_be32(rule->ep.user_def.data); + fs->m_ext.data[0] = + cpu_to_be32(HCLGE_FD_USER_DEF_OFFSET_UNMASK); + fs->m_ext.data[1] = cpu_to_be32(rule->ep.user_def.data_mask); + } +} + +static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs, + struct hclge_fd_rule *rule) +{ + if (fs->flow_type & FLOW_EXT) { + fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1); + fs->m_ext.vlan_tci = + rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ? + 0 : cpu_to_be16(rule->tuples_mask.vlan_tag1); + + hclge_fd_get_user_def_info(fs, rule); + } + + if (fs->flow_type & FLOW_MAC_EXT) { + ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac); + if (rule->unused_tuple & BIT(INNER_DST_MAC)) + eth_zero_addr(fs->m_u.ether_spec.h_dest); + else + ether_addr_copy(fs->m_u.ether_spec.h_dest, + rule->tuples_mask.dst_mac); + } +} + +static struct hclge_fd_rule *hclge_get_fd_rule(struct hclge_dev *hdev, + u16 location) +{ + struct hclge_fd_rule *rule = NULL; + struct hlist_node *node2; + + hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) { + if (rule->location == location) + return rule; + else if (rule->location > location) + return NULL; + } + + return NULL; +} + +static void hclge_fd_get_ring_cookie(struct ethtool_rx_flow_spec *fs, + struct hclge_fd_rule *rule) +{ + if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) { + fs->ring_cookie = RX_CLS_FLOW_DISC; + } else { + u64 vf_id; + + fs->ring_cookie = rule->queue_id; + vf_id = rule->vf_id; + vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; + fs->ring_cookie |= vf_id; + } +} + +static int hclge_get_fd_rule_info(struct hnae3_handle *handle, + struct ethtool_rxnfc *cmd) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_fd_rule *rule = NULL; + struct hclge_dev *hdev = vport->back; + struct ethtool_rx_flow_spec *fs; + + if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) + return -EOPNOTSUPP; + + fs = (struct ethtool_rx_flow_spec *)&cmd->fs; + + spin_lock_bh(&hdev->fd_rule_lock); + + rule = hclge_get_fd_rule(hdev, fs->location); + if (!rule) { + spin_unlock_bh(&hdev->fd_rule_lock); + return -ENOENT; + } + + fs->flow_type = rule->flow_type; + switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { + case SCTP_V4_FLOW: + case TCP_V4_FLOW: + case UDP_V4_FLOW: + hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec, + &fs->m_u.tcp_ip4_spec); + break; + case IP_USER_FLOW: + hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec, + &fs->m_u.usr_ip4_spec); + break; + case SCTP_V6_FLOW: + case TCP_V6_FLOW: + case UDP_V6_FLOW: + hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec, + &fs->m_u.tcp_ip6_spec); + break; + case IPV6_USER_FLOW: + hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec, + &fs->m_u.usr_ip6_spec); + break; + /* The flow type of fd rule has been checked before adding in to rule + * list. As other flow types have been handled, it must be ETHER_FLOW + * for the default case + */ + default: + hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec, + &fs->m_u.ether_spec); + break; + } + + hclge_fd_get_ext_info(fs, rule); + + hclge_fd_get_ring_cookie(fs, rule); + + spin_unlock_bh(&hdev->fd_rule_lock); + + return 0; +} + +static int hclge_get_all_rules(struct hnae3_handle *handle, + struct ethtool_rxnfc *cmd, u32 *rule_locs) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hclge_fd_rule *rule; + struct hlist_node *node2; + int cnt = 0; + + if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) + return -EOPNOTSUPP; + + cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]; + + spin_lock_bh(&hdev->fd_rule_lock); + hlist_for_each_entry_safe(rule, node2, + &hdev->fd_rule_list, rule_node) { + if (cnt == cmd->rule_cnt) { + spin_unlock_bh(&hdev->fd_rule_lock); + return -EMSGSIZE; + } + + if (rule->state == HCLGE_FD_TO_DEL) + continue; + + rule_locs[cnt] = rule->location; + cnt++; + } + + spin_unlock_bh(&hdev->fd_rule_lock); + + cmd->rule_cnt = cnt; + + return 0; +} + +static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys, + struct hclge_fd_rule_tuples *tuples) +{ +#define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32 +#define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32 + + tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto); + tuples->ip_proto = fkeys->basic.ip_proto; + tuples->dst_port = be16_to_cpu(fkeys->ports.dst); + + if (fkeys->basic.n_proto == htons(ETH_P_IP)) { + tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src); + tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst); + } else { + int i; + + for (i = 0; i < IPV6_SIZE; i++) { + tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]); + tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]); + } + } +} + +/* traverse all rules, check whether an existed rule has the same tuples */ +static struct hclge_fd_rule * +hclge_fd_search_flow_keys(struct hclge_dev *hdev, + const struct hclge_fd_rule_tuples *tuples) +{ + struct hclge_fd_rule *rule = NULL; + struct hlist_node *node; + + hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { + if (!memcmp(tuples, &rule->tuples, sizeof(*tuples))) + return rule; + } + + return NULL; +} + +static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples, + struct hclge_fd_rule *rule) +{ + rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | + BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) | + BIT(INNER_SRC_PORT); + rule->action = 0; + rule->vf_id = 0; + rule->rule_type = HCLGE_FD_ARFS_ACTIVE; + rule->state = HCLGE_FD_TO_ADD; + if (tuples->ether_proto == ETH_P_IP) { + if (tuples->ip_proto == IPPROTO_TCP) + rule->flow_type = TCP_V4_FLOW; + else + rule->flow_type = UDP_V4_FLOW; + } else { + if (tuples->ip_proto == IPPROTO_TCP) + rule->flow_type = TCP_V6_FLOW; + else + rule->flow_type = UDP_V6_FLOW; + } + memcpy(&rule->tuples, tuples, sizeof(rule->tuples)); + memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask)); +} + +static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id, + u16 flow_id, struct flow_keys *fkeys) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_fd_rule_tuples new_tuples = {}; + struct hclge_dev *hdev = vport->back; + struct hclge_fd_rule *rule; + u16 bit_id; + + if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) + return -EOPNOTSUPP; + + /* when there is already fd rule existed add by user, + * arfs should not work + */ + spin_lock_bh(&hdev->fd_rule_lock); + if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE && + hdev->fd_active_type != HCLGE_FD_RULE_NONE) { + spin_unlock_bh(&hdev->fd_rule_lock); + return -EOPNOTSUPP; + } + + hclge_fd_get_flow_tuples(fkeys, &new_tuples); + + /* check is there flow director filter existed for this flow, + * if not, create a new filter for it; + * if filter exist with different queue id, modify the filter; + * if filter exist with same queue id, do nothing + */ + rule = hclge_fd_search_flow_keys(hdev, &new_tuples); + if (!rule) { + bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM); + if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) { + spin_unlock_bh(&hdev->fd_rule_lock); + return -ENOSPC; + } + + rule = kzalloc(sizeof(*rule), GFP_ATOMIC); + if (!rule) { + spin_unlock_bh(&hdev->fd_rule_lock); + return -ENOMEM; + } + + rule->location = bit_id; + rule->arfs.flow_id = flow_id; + rule->queue_id = queue_id; + hclge_fd_build_arfs_rule(&new_tuples, rule); + hclge_update_fd_list(hdev, rule->state, rule->location, rule); + hdev->fd_active_type = HCLGE_FD_ARFS_ACTIVE; + } else if (rule->queue_id != queue_id) { + rule->queue_id = queue_id; + rule->state = HCLGE_FD_TO_ADD; + set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); + hclge_task_schedule(hdev, 0); + } + spin_unlock_bh(&hdev->fd_rule_lock); + return rule->location; +} + +static void hclge_rfs_filter_expire(struct hclge_dev *hdev) +{ +#ifdef CONFIG_RFS_ACCEL + struct hnae3_handle *handle = &hdev->vport[0].nic; + struct hclge_fd_rule *rule; + struct hlist_node *node; + + spin_lock_bh(&hdev->fd_rule_lock); + if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) { + spin_unlock_bh(&hdev->fd_rule_lock); + return; + } + hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { + if (rule->state != HCLGE_FD_ACTIVE) + continue; + if (rps_may_expire_flow(handle->netdev, rule->queue_id, + rule->arfs.flow_id, rule->location)) { + rule->state = HCLGE_FD_TO_DEL; + set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); + } + } + spin_unlock_bh(&hdev->fd_rule_lock); +#endif +} + +/* make sure being called after lock up with fd_rule_lock */ +static int hclge_clear_arfs_rules(struct hclge_dev *hdev) +{ +#ifdef CONFIG_RFS_ACCEL + struct hclge_fd_rule *rule; + struct hlist_node *node; + int ret; + + if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) + return 0; + + hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { + switch (rule->state) { + case HCLGE_FD_TO_DEL: + case HCLGE_FD_ACTIVE: + ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, + rule->location, NULL, false); + if (ret) + return ret; + fallthrough; + case HCLGE_FD_TO_ADD: + hclge_fd_dec_rule_cnt(hdev, rule->location); + hlist_del(&rule->rule_node); + kfree(rule); + break; + default: + break; + } + } + hclge_sync_fd_state(hdev); + +#endif + return 0; +} + +static void hclge_get_cls_key_basic(const struct flow_rule *flow, + struct hclge_fd_rule *rule) +{ + if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_match_basic match; + u16 ethtype_key, ethtype_mask; + + flow_rule_match_basic(flow, &match); + ethtype_key = ntohs(match.key->n_proto); + ethtype_mask = ntohs(match.mask->n_proto); + + if (ethtype_key == ETH_P_ALL) { + ethtype_key = 0; + ethtype_mask = 0; + } + rule->tuples.ether_proto = ethtype_key; + rule->tuples_mask.ether_proto = ethtype_mask; + rule->tuples.ip_proto = match.key->ip_proto; + rule->tuples_mask.ip_proto = match.mask->ip_proto; + } else { + rule->unused_tuple |= BIT(INNER_IP_PROTO); + rule->unused_tuple |= BIT(INNER_ETH_TYPE); + } +} + +static void hclge_get_cls_key_mac(const struct flow_rule *flow, + struct hclge_fd_rule *rule) +{ + if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + struct flow_match_eth_addrs match; + + flow_rule_match_eth_addrs(flow, &match); + ether_addr_copy(rule->tuples.dst_mac, match.key->dst); + ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst); + ether_addr_copy(rule->tuples.src_mac, match.key->src); + ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src); + } else { + rule->unused_tuple |= BIT(INNER_DST_MAC); + rule->unused_tuple |= BIT(INNER_SRC_MAC); + } +} + +static void hclge_get_cls_key_vlan(const struct flow_rule *flow, + struct hclge_fd_rule *rule) +{ + if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) { + struct flow_match_vlan match; + + flow_rule_match_vlan(flow, &match); + rule->tuples.vlan_tag1 = match.key->vlan_id | + (match.key->vlan_priority << VLAN_PRIO_SHIFT); + rule->tuples_mask.vlan_tag1 = match.mask->vlan_id | + (match.mask->vlan_priority << VLAN_PRIO_SHIFT); + } else { + rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST); + } +} + +static void hclge_get_cls_key_ip(const struct flow_rule *flow, + struct hclge_fd_rule *rule) +{ + u16 addr_type = 0; + + if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_CONTROL)) { + struct flow_match_control match; + + flow_rule_match_control(flow, &match); + addr_type = match.key->addr_type; + } + + if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { + struct flow_match_ipv4_addrs match; + + flow_rule_match_ipv4_addrs(flow, &match); + rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src); + rule->tuples_mask.src_ip[IPV4_INDEX] = + be32_to_cpu(match.mask->src); + rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst); + rule->tuples_mask.dst_ip[IPV4_INDEX] = + be32_to_cpu(match.mask->dst); + } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { + struct flow_match_ipv6_addrs match; + + flow_rule_match_ipv6_addrs(flow, &match); + be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32, + IPV6_SIZE); + be32_to_cpu_array(rule->tuples_mask.src_ip, + match.mask->src.s6_addr32, IPV6_SIZE); + be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32, + IPV6_SIZE); + be32_to_cpu_array(rule->tuples_mask.dst_ip, + match.mask->dst.s6_addr32, IPV6_SIZE); + } else { + rule->unused_tuple |= BIT(INNER_SRC_IP); + rule->unused_tuple |= BIT(INNER_DST_IP); + } +} + +static void hclge_get_cls_key_port(const struct flow_rule *flow, + struct hclge_fd_rule *rule) +{ + if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) { + struct flow_match_ports match; + + flow_rule_match_ports(flow, &match); + + rule->tuples.src_port = be16_to_cpu(match.key->src); + rule->tuples_mask.src_port = be16_to_cpu(match.mask->src); + rule->tuples.dst_port = be16_to_cpu(match.key->dst); + rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst); + } else { + rule->unused_tuple |= BIT(INNER_SRC_PORT); + rule->unused_tuple |= BIT(INNER_DST_PORT); + } +} + +static int hclge_parse_cls_flower(struct hclge_dev *hdev, + struct flow_cls_offload *cls_flower, + struct hclge_fd_rule *rule) +{ + struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower); + struct flow_dissector *dissector = flow->match.dissector; + + if (dissector->used_keys & + ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | + BIT(FLOW_DISSECTOR_KEY_BASIC) | + BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_VLAN) | + BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_PORTS))) { + dev_err(&hdev->pdev->dev, "unsupported key set: %#x\n", + dissector->used_keys); + return -EOPNOTSUPP; + } + + hclge_get_cls_key_basic(flow, rule); + hclge_get_cls_key_mac(flow, rule); + hclge_get_cls_key_vlan(flow, rule); + hclge_get_cls_key_ip(flow, rule); + hclge_get_cls_key_port(flow, rule); + + return 0; +} + +static int hclge_check_cls_flower(struct hclge_dev *hdev, + struct flow_cls_offload *cls_flower, int tc) +{ + u32 prio = cls_flower->common.prio; + + if (tc < 0 || tc > hdev->tc_max) { + dev_err(&hdev->pdev->dev, "invalid traffic class\n"); + return -EINVAL; + } + + if (prio == 0 || + prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) { + dev_err(&hdev->pdev->dev, + "prio %u should be in range[1, %u]\n", + prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]); + return -EINVAL; + } + + if (test_bit(prio - 1, hdev->fd_bmap)) { + dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio); + return -EINVAL; + } + return 0; +} + +static int hclge_add_cls_flower(struct hnae3_handle *handle, + struct flow_cls_offload *cls_flower, + int tc) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hclge_fd_rule *rule; + int ret; + + if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) { + dev_err(&hdev->pdev->dev, + "cls flower is not supported\n"); + return -EOPNOTSUPP; + } + + ret = hclge_check_cls_flower(hdev, cls_flower, tc); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to check cls flower params, ret = %d\n", ret); + return ret; + } + + rule = kzalloc(sizeof(*rule), GFP_KERNEL); + if (!rule) + return -ENOMEM; + + ret = hclge_parse_cls_flower(hdev, cls_flower, rule); + if (ret) { + kfree(rule); + return ret; + } + + rule->action = HCLGE_FD_ACTION_SELECT_TC; + rule->cls_flower.tc = tc; + rule->location = cls_flower->common.prio - 1; + rule->vf_id = 0; + rule->cls_flower.cookie = cls_flower->cookie; + rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE; + + ret = hclge_add_fd_entry_common(hdev, rule); + if (ret) + kfree(rule); + + return ret; +} + +static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev, + unsigned long cookie) +{ + struct hclge_fd_rule *rule; + struct hlist_node *node; + + hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { + if (rule->cls_flower.cookie == cookie) + return rule; + } + + return NULL; +} + +static int hclge_del_cls_flower(struct hnae3_handle *handle, + struct flow_cls_offload *cls_flower) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hclge_fd_rule *rule; + int ret; + + if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) + return -EOPNOTSUPP; + + spin_lock_bh(&hdev->fd_rule_lock); + + rule = hclge_find_cls_flower(hdev, cls_flower->cookie); + if (!rule) { + spin_unlock_bh(&hdev->fd_rule_lock); + return -EINVAL; + } + + ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location, + NULL, false); + if (ret) { + /* if tcam config fail, set rule state to TO_DEL, + * so the rule will be deleted when periodic + * task being scheduled. + */ + hclge_update_fd_list(hdev, HCLGE_FD_TO_DEL, rule->location, NULL); + set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); + spin_unlock_bh(&hdev->fd_rule_lock); + return ret; + } + + hclge_update_fd_list(hdev, HCLGE_FD_DELETED, rule->location, NULL); + spin_unlock_bh(&hdev->fd_rule_lock); + + return 0; +} + +static void hclge_sync_fd_list(struct hclge_dev *hdev, struct hlist_head *hlist) +{ + struct hclge_fd_rule *rule; + struct hlist_node *node; + int ret = 0; + + if (!test_and_clear_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state)) + return; + + spin_lock_bh(&hdev->fd_rule_lock); + + hlist_for_each_entry_safe(rule, node, hlist, rule_node) { + switch (rule->state) { + case HCLGE_FD_TO_ADD: + ret = hclge_fd_config_rule(hdev, rule); + if (ret) + goto out; + rule->state = HCLGE_FD_ACTIVE; + break; + case HCLGE_FD_TO_DEL: + ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, + rule->location, NULL, false); + if (ret) + goto out; + hclge_fd_dec_rule_cnt(hdev, rule->location); + hclge_fd_free_node(hdev, rule); + break; + default: + break; + } + } + +out: + if (ret) + set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); + + spin_unlock_bh(&hdev->fd_rule_lock); +} + +static void hclge_sync_fd_table(struct hclge_dev *hdev) +{ + if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) + return; + + if (test_and_clear_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state)) { + bool clear_list = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE; + + hclge_clear_fd_rules_in_list(hdev, clear_list); + } + + hclge_sync_fd_user_def_cfg(hdev, false); + + hclge_sync_fd_list(hdev, &hdev->fd_rule_list); +} + +static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) || + hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING); +} + +static bool hclge_get_cmdq_stat(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + return test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); +} + +static bool hclge_ae_dev_resetting(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); +} + +static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + return hdev->rst_stats.hw_reset_done_cnt; +} + +static void hclge_enable_fd(struct hnae3_handle *handle, bool enable) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + hdev->fd_en = enable; + + if (!enable) + set_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state); + else + hclge_restore_fd_entries(handle); + + hclge_task_schedule(hdev, 0); +} + +static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) +{ +#define HCLGE_LINK_STATUS_WAIT_CNT 3 + + struct hclge_desc desc; + struct hclge_config_mac_mode_cmd *req = + (struct hclge_config_mac_mode_cmd *)desc.data; + u32 loop_en = 0; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false); + + if (enable) { + hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U); + hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U); + hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U); + hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U); + hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U); + hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U); + hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U); + hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U); + hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U); + hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U); + } + + req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "mac enable fail, ret =%d.\n", ret); + return; + } + + if (!enable) + hclge_mac_link_status_wait(hdev, HCLGE_LINK_STATUS_DOWN, + HCLGE_LINK_STATUS_WAIT_CNT); +} + +static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid, + u8 switch_param, u8 param_mask) +{ + struct hclge_mac_vlan_switch_cmd *req; + struct hclge_desc desc; + u32 func_id; + int ret; + + func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0); + req = (struct hclge_mac_vlan_switch_cmd *)desc.data; + + /* read current config parameter */ + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM, + true); + req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL; + req->func_id = cpu_to_le32(func_id); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "read mac vlan switch parameter fail, ret = %d\n", ret); + return ret; + } + + /* modify and write new config parameter */ + hclge_comm_cmd_reuse_desc(&desc, false); + req->switch_param = (req->switch_param & param_mask) | switch_param; + req->param_mask = param_mask; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "set mac vlan switch parameter fail, ret = %d\n", ret); + return ret; +} + +static void hclge_phy_link_status_wait(struct hclge_dev *hdev, + int link_ret) +{ +#define HCLGE_PHY_LINK_STATUS_NUM 200 + + struct phy_device *phydev = hdev->hw.mac.phydev; + int i = 0; + int ret; + + do { + ret = phy_read_status(phydev); + if (ret) { + dev_err(&hdev->pdev->dev, + "phy update link status fail, ret = %d\n", ret); + return; + } + + if (phydev->link == link_ret) + break; + + msleep(HCLGE_LINK_STATUS_MS); + } while (++i < HCLGE_PHY_LINK_STATUS_NUM); +} + +static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret, + int wait_cnt) +{ + int link_status; + int i = 0; + int ret; + + do { + ret = hclge_get_mac_link_status(hdev, &link_status); + if (ret) + return ret; + if (link_status == link_ret) + return 0; + + msleep(HCLGE_LINK_STATUS_MS); + } while (++i < wait_cnt); + return -EBUSY; +} + +static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en, + bool is_phy) +{ +#define HCLGE_MAC_LINK_STATUS_NUM 100 + + int link_ret; + + link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN; + + if (is_phy) + hclge_phy_link_status_wait(hdev, link_ret); + + return hclge_mac_link_status_wait(hdev, link_ret, + HCLGE_MAC_LINK_STATUS_NUM); +} + +static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en) +{ + struct hclge_config_mac_mode_cmd *req; + struct hclge_desc desc; + u32 loop_en; + int ret; + + req = (struct hclge_config_mac_mode_cmd *)&desc.data[0]; + /* 1 Read out the MAC mode config at first */ + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "mac loopback get fail, ret =%d.\n", ret); + return ret; + } + + /* 2 Then setup the loopback flag */ + loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en); + hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0); + + req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); + + /* 3 Config mac work mode with loopback flag + * and its original configure parameters + */ + hclge_comm_cmd_reuse_desc(&desc, false); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "mac loopback set fail, ret =%d.\n", ret); + return ret; +} + +static int hclge_cfg_common_loopback_cmd_send(struct hclge_dev *hdev, bool en, + enum hnae3_loop loop_mode) +{ + struct hclge_common_lb_cmd *req; + struct hclge_desc desc; + u8 loop_mode_b; + int ret; + + req = (struct hclge_common_lb_cmd *)desc.data; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, false); + + switch (loop_mode) { + case HNAE3_LOOP_SERIAL_SERDES: + loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B; + break; + case HNAE3_LOOP_PARALLEL_SERDES: + loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B; + break; + case HNAE3_LOOP_PHY: + loop_mode_b = HCLGE_CMD_GE_PHY_INNER_LOOP_B; + break; + default: + dev_err(&hdev->pdev->dev, + "unsupported loopback mode %d\n", loop_mode); + return -ENOTSUPP; + } + + req->mask = loop_mode_b; + if (en) + req->enable = loop_mode_b; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to send loopback cmd, loop_mode = %d, ret = %d\n", + loop_mode, ret); + + return ret; +} + +static int hclge_cfg_common_loopback_wait(struct hclge_dev *hdev) +{ +#define HCLGE_COMMON_LB_RETRY_MS 10 +#define HCLGE_COMMON_LB_RETRY_NUM 100 + + struct hclge_common_lb_cmd *req; + struct hclge_desc desc; + u32 i = 0; + int ret; + + req = (struct hclge_common_lb_cmd *)desc.data; + + do { + msleep(HCLGE_COMMON_LB_RETRY_MS); + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, + true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get loopback done status, ret = %d\n", + ret); + return ret; + } + } while (++i < HCLGE_COMMON_LB_RETRY_NUM && + !(req->result & HCLGE_CMD_COMMON_LB_DONE_B)); + + if (!(req->result & HCLGE_CMD_COMMON_LB_DONE_B)) { + dev_err(&hdev->pdev->dev, "wait loopback timeout\n"); + return -EBUSY; + } else if (!(req->result & HCLGE_CMD_COMMON_LB_SUCCESS_B)) { + dev_err(&hdev->pdev->dev, "failed to do loopback test\n"); + return -EIO; + } + + return 0; +} + +static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en, + enum hnae3_loop loop_mode) +{ + int ret; + + ret = hclge_cfg_common_loopback_cmd_send(hdev, en, loop_mode); + if (ret) + return ret; + + return hclge_cfg_common_loopback_wait(hdev); +} + +static int hclge_set_common_loopback(struct hclge_dev *hdev, bool en, + enum hnae3_loop loop_mode) +{ + int ret; + + ret = hclge_cfg_common_loopback(hdev, en, loop_mode); + if (ret) + return ret; + + hclge_cfg_mac_mode(hdev, en); + + ret = hclge_mac_phy_link_status_wait(hdev, en, false); + if (ret) + dev_err(&hdev->pdev->dev, + "serdes loopback config mac mode timeout\n"); + + return ret; +} + +static int hclge_enable_phy_loopback(struct hclge_dev *hdev, + struct phy_device *phydev) +{ + int ret; + + if (!phydev->suspended) { + ret = phy_suspend(phydev); + if (ret) + return ret; + } + + ret = phy_resume(phydev); + if (ret) + return ret; + + return phy_loopback(phydev, true); +} + +static int hclge_disable_phy_loopback(struct hclge_dev *hdev, + struct phy_device *phydev) +{ + int ret; + + ret = phy_loopback(phydev, false); + if (ret) + return ret; + + return phy_suspend(phydev); +} + +static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en) +{ + struct phy_device *phydev = hdev->hw.mac.phydev; + int ret; + + if (!phydev) { + if (hnae3_dev_phy_imp_supported(hdev)) + return hclge_set_common_loopback(hdev, en, + HNAE3_LOOP_PHY); + return -ENOTSUPP; + } + + if (en) + ret = hclge_enable_phy_loopback(hdev, phydev); + else + ret = hclge_disable_phy_loopback(hdev, phydev); + if (ret) { + dev_err(&hdev->pdev->dev, + "set phy loopback fail, ret = %d\n", ret); + return ret; + } + + hclge_cfg_mac_mode(hdev, en); + + ret = hclge_mac_phy_link_status_wait(hdev, en, true); + if (ret) + dev_err(&hdev->pdev->dev, + "phy loopback config mac mode timeout\n"); + + return ret; +} + +static int hclge_tqp_enable_cmd_send(struct hclge_dev *hdev, u16 tqp_id, + u16 stream_id, bool enable) +{ + struct hclge_desc desc; + struct hclge_cfg_com_tqp_queue_cmd *req = + (struct hclge_cfg_com_tqp_queue_cmd *)desc.data; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false); + req->tqp_id = cpu_to_le16(tqp_id); + req->stream_id = cpu_to_le16(stream_id); + if (enable) + req->enable |= 1U << HCLGE_TQP_ENABLE_B; + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_tqp_enable(struct hnae3_handle *handle, bool enable) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + int ret; + u16 i; + + for (i = 0; i < handle->kinfo.num_tqps; i++) { + ret = hclge_tqp_enable_cmd_send(hdev, i, 0, enable); + if (ret) + return ret; + } + return 0; +} + +static int hclge_set_loopback(struct hnae3_handle *handle, + enum hnae3_loop loop_mode, bool en) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + int ret = 0; + + /* Loopback can be enabled in three places: SSU, MAC, and serdes. By + * default, SSU loopback is enabled, so if the SMAC and the DMAC are + * the same, the packets are looped back in the SSU. If SSU loopback + * is disabled, packets can reach MAC even if SMAC is the same as DMAC. + */ + if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { + u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B); + + ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param, + HCLGE_SWITCH_ALW_LPBK_MASK); + if (ret) + return ret; + } + + switch (loop_mode) { + case HNAE3_LOOP_APP: + ret = hclge_set_app_loopback(hdev, en); + break; + case HNAE3_LOOP_SERIAL_SERDES: + case HNAE3_LOOP_PARALLEL_SERDES: + ret = hclge_set_common_loopback(hdev, en, loop_mode); + break; + case HNAE3_LOOP_PHY: + ret = hclge_set_phy_loopback(hdev, en); + break; + case HNAE3_LOOP_EXTERNAL: + break; + default: + ret = -ENOTSUPP; + dev_err(&hdev->pdev->dev, + "loop_mode %d is not supported\n", loop_mode); + break; + } + + if (ret) + return ret; + + ret = hclge_tqp_enable(handle, en); + if (ret) + dev_err(&hdev->pdev->dev, "failed to %s tqp in loopback, ret = %d\n", + en ? "enable" : "disable", ret); + + return ret; +} + +static int hclge_set_default_loopback(struct hclge_dev *hdev) +{ + int ret; + + ret = hclge_set_app_loopback(hdev, false); + if (ret) + return ret; + + ret = hclge_cfg_common_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES); + if (ret) + return ret; + + return hclge_cfg_common_loopback(hdev, false, + HNAE3_LOOP_PARALLEL_SERDES); +} + +static void hclge_flush_link_update(struct hclge_dev *hdev) +{ +#define HCLGE_FLUSH_LINK_TIMEOUT 100000 + + unsigned long last = hdev->serv_processed_cnt; + int i = 0; + + while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) && + i++ < HCLGE_FLUSH_LINK_TIMEOUT && + last == hdev->serv_processed_cnt) + usleep_range(1, 1); +} + +static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + if (enable) { + hclge_task_schedule(hdev, 0); + } else { + /* Set the DOWN flag here to disable link updating */ + set_bit(HCLGE_STATE_DOWN, &hdev->state); + + /* flush memory to make sure DOWN is seen by service task */ + smp_mb__before_atomic(); + hclge_flush_link_update(hdev); + } +} + +static int hclge_ae_start(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + /* mac enable */ + hclge_cfg_mac_mode(hdev, true); + clear_bit(HCLGE_STATE_DOWN, &hdev->state); + hdev->hw.mac.link = 0; + + /* reset tqp stats */ + hclge_comm_reset_tqp_stats(handle); + + hclge_mac_start_phy(hdev); + + return 0; +} + +static void hclge_ae_stop(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + set_bit(HCLGE_STATE_DOWN, &hdev->state); + spin_lock_bh(&hdev->fd_rule_lock); + hclge_clear_arfs_rules(hdev); + spin_unlock_bh(&hdev->fd_rule_lock); + + /* If it is not PF reset or FLR, the firmware will disable the MAC, + * so it only need to stop phy here. + */ + if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) { + hclge_pfc_pause_en_cfg(hdev, HCLGE_PFC_TX_RX_DISABLE, + HCLGE_PFC_DISABLE); + if (hdev->reset_type != HNAE3_FUNC_RESET && + hdev->reset_type != HNAE3_FLR_RESET) { + hclge_mac_stop_phy(hdev); + hclge_update_link_status(hdev); + return; + } + } + + hclge_reset_tqp(handle); + + hclge_config_mac_tnl_int(hdev, false); + + /* Mac disable */ + hclge_cfg_mac_mode(hdev, false); + + hclge_mac_stop_phy(hdev); + + /* reset tqp stats */ + hclge_comm_reset_tqp_stats(handle); + hclge_update_link_status(hdev); +} + +int hclge_vport_start(struct hclge_vport *vport) +{ + struct hclge_dev *hdev = vport->back; + + set_bit(HCLGE_VPORT_STATE_INITED, &vport->state); + set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); + set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state); + vport->last_active_jiffies = jiffies; + vport->need_notify = 0; + + if (test_bit(vport->vport_id, hdev->vport_config_block)) { + if (vport->vport_id) { + hclge_restore_mac_table_common(vport); + hclge_restore_vport_vlan_table(vport); + } else { + hclge_restore_hw_table(hdev); + } + } + + clear_bit(vport->vport_id, hdev->vport_config_block); + + return 0; +} + +void hclge_vport_stop(struct hclge_vport *vport) +{ + clear_bit(HCLGE_VPORT_STATE_INITED, &vport->state); + clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); + vport->need_notify = 0; +} + +static int hclge_client_start(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + + return hclge_vport_start(vport); +} + +static void hclge_client_stop(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + + hclge_vport_stop(vport); +} + +static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport, + u16 cmdq_resp, u8 resp_code, + enum hclge_mac_vlan_tbl_opcode op) +{ + struct hclge_dev *hdev = vport->back; + + if (cmdq_resp) { + dev_err(&hdev->pdev->dev, + "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n", + cmdq_resp); + return -EIO; + } + + if (op == HCLGE_MAC_VLAN_ADD) { + if (!resp_code || resp_code == 1) + return 0; + else if (resp_code == HCLGE_ADD_UC_OVERFLOW || + resp_code == HCLGE_ADD_MC_OVERFLOW) + return -ENOSPC; + + dev_err(&hdev->pdev->dev, + "add mac addr failed for undefined, code=%u.\n", + resp_code); + return -EIO; + } else if (op == HCLGE_MAC_VLAN_REMOVE) { + if (!resp_code) { + return 0; + } else if (resp_code == 1) { + dev_dbg(&hdev->pdev->dev, + "remove mac addr failed for miss.\n"); + return -ENOENT; + } + + dev_err(&hdev->pdev->dev, + "remove mac addr failed for undefined, code=%u.\n", + resp_code); + return -EIO; + } else if (op == HCLGE_MAC_VLAN_LKUP) { + if (!resp_code) { + return 0; + } else if (resp_code == 1) { + dev_dbg(&hdev->pdev->dev, + "lookup mac addr failed for miss.\n"); + return -ENOENT; + } + + dev_err(&hdev->pdev->dev, + "lookup mac addr failed for undefined, code=%u.\n", + resp_code); + return -EIO; + } + + dev_err(&hdev->pdev->dev, + "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op); + + return -EINVAL; +} + +static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr) +{ +#define HCLGE_VF_NUM_IN_FIRST_DESC 192 + + unsigned int word_num; + unsigned int bit_num; + + if (vfid > 255 || vfid < 0) + return -EIO; + + if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) { + word_num = vfid / 32; + bit_num = vfid % 32; + if (clr) + desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num)); + else + desc[1].data[word_num] |= cpu_to_le32(1 << bit_num); + } else { + word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32; + bit_num = vfid % 32; + if (clr) + desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num)); + else + desc[2].data[word_num] |= cpu_to_le32(1 << bit_num); + } + + return 0; +} + +static bool hclge_is_all_function_id_zero(struct hclge_desc *desc) +{ +#define HCLGE_DESC_NUMBER 3 +#define HCLGE_FUNC_NUMBER_PER_DESC 6 + int i, j; + + for (i = 1; i < HCLGE_DESC_NUMBER; i++) + for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++) + if (desc[i].data[j]) + return false; + + return true; +} + +static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req, + const u8 *addr, bool is_mc) +{ + const unsigned char *mac_addr = addr; + u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) | + (mac_addr[0]) | (mac_addr[1] << 8); + u32 low_val = mac_addr[4] | (mac_addr[5] << 8); + + hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); + if (is_mc) { + hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); + hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1); + } + + new_req->mac_addr_hi32 = cpu_to_le32(high_val); + new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff); +} + +static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport, + struct hclge_mac_vlan_tbl_entry_cmd *req) +{ + struct hclge_dev *hdev = vport->back; + struct hclge_desc desc; + u8 resp_code; + u16 retval; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false); + + memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "del mac addr failed for cmd_send, ret =%d.\n", + ret); + return ret; + } + resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; + retval = le16_to_cpu(desc.retval); + + return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code, + HCLGE_MAC_VLAN_REMOVE); +} + +static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport, + struct hclge_mac_vlan_tbl_entry_cmd *req, + struct hclge_desc *desc, + bool is_mc) +{ + struct hclge_dev *hdev = vport->back; + u8 resp_code; + u16 retval; + int ret; + + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true); + if (is_mc) { + desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + memcpy(desc[0].data, + req, + sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); + hclge_cmd_setup_basic_desc(&desc[1], + HCLGE_OPC_MAC_VLAN_ADD, + true); + desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[2], + HCLGE_OPC_MAC_VLAN_ADD, + true); + ret = hclge_cmd_send(&hdev->hw, desc, 3); + } else { + memcpy(desc[0].data, + req, + sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); + ret = hclge_cmd_send(&hdev->hw, desc, 1); + } + if (ret) { + dev_err(&hdev->pdev->dev, + "lookup mac addr failed for cmd_send, ret =%d.\n", + ret); + return ret; + } + resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff; + retval = le16_to_cpu(desc[0].retval); + + return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code, + HCLGE_MAC_VLAN_LKUP); +} + +static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport, + struct hclge_mac_vlan_tbl_entry_cmd *req, + struct hclge_desc *mc_desc) +{ + struct hclge_dev *hdev = vport->back; + int cfg_status; + u8 resp_code; + u16 retval; + int ret; + + if (!mc_desc) { + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, + HCLGE_OPC_MAC_VLAN_ADD, + false); + memcpy(desc.data, req, + sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; + retval = le16_to_cpu(desc.retval); + + cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval, + resp_code, + HCLGE_MAC_VLAN_ADD); + } else { + hclge_comm_cmd_reuse_desc(&mc_desc[0], false); + mc_desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + hclge_comm_cmd_reuse_desc(&mc_desc[1], false); + mc_desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + hclge_comm_cmd_reuse_desc(&mc_desc[2], false); + mc_desc[2].flag &= cpu_to_le16(~HCLGE_COMM_CMD_FLAG_NEXT); + memcpy(mc_desc[0].data, req, + sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); + ret = hclge_cmd_send(&hdev->hw, mc_desc, 3); + resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff; + retval = le16_to_cpu(mc_desc[0].retval); + + cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval, + resp_code, + HCLGE_MAC_VLAN_ADD); + } + + if (ret) { + dev_err(&hdev->pdev->dev, + "add mac addr failed for cmd_send, ret =%d.\n", + ret); + return ret; + } + + return cfg_status; +} + +static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size, + u16 *allocated_size) +{ + struct hclge_umv_spc_alc_cmd *req; + struct hclge_desc desc; + int ret; + + req = (struct hclge_umv_spc_alc_cmd *)desc.data; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false); + + req->space_size = cpu_to_le32(space_size); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n", + ret); + return ret; + } + + *allocated_size = le32_to_cpu(desc.data[1]); + + return 0; +} + +static int hclge_init_umv_space(struct hclge_dev *hdev) +{ + u16 allocated_size = 0; + int ret; + + ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size); + if (ret) + return ret; + + if (allocated_size < hdev->wanted_umv_size) + dev_warn(&hdev->pdev->dev, + "failed to alloc umv space, want %u, get %u\n", + hdev->wanted_umv_size, allocated_size); + + hdev->max_umv_size = allocated_size; + hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1); + hdev->share_umv_size = hdev->priv_umv_size + + hdev->max_umv_size % (hdev->num_alloc_vport + 1); + + if (hdev->ae_dev->dev_specs.mc_mac_size) + set_bit(HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, hdev->ae_dev->caps); + + return 0; +} + +static void hclge_reset_umv_space(struct hclge_dev *hdev) +{ + struct hclge_vport *vport; + int i; + + for (i = 0; i < hdev->num_alloc_vport; i++) { + vport = &hdev->vport[i]; + vport->used_umv_num = 0; + } + + mutex_lock(&hdev->vport_lock); + hdev->share_umv_size = hdev->priv_umv_size + + hdev->max_umv_size % (hdev->num_alloc_vport + 1); + mutex_unlock(&hdev->vport_lock); + + hdev->used_mc_mac_num = 0; +} + +static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock) +{ + struct hclge_dev *hdev = vport->back; + bool is_full; + + if (need_lock) + mutex_lock(&hdev->vport_lock); + + is_full = (vport->used_umv_num >= hdev->priv_umv_size && + hdev->share_umv_size == 0); + + if (need_lock) + mutex_unlock(&hdev->vport_lock); + + return is_full; +} + +static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free) +{ + struct hclge_dev *hdev = vport->back; + + if (is_free) { + if (vport->used_umv_num > hdev->priv_umv_size) + hdev->share_umv_size++; + + if (vport->used_umv_num > 0) + vport->used_umv_num--; + } else { + if (vport->used_umv_num >= hdev->priv_umv_size && + hdev->share_umv_size > 0) + hdev->share_umv_size--; + vport->used_umv_num++; + } +} + +static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list, + const u8 *mac_addr) +{ + struct hclge_mac_node *mac_node, *tmp; + + list_for_each_entry_safe(mac_node, tmp, list, node) + if (ether_addr_equal(mac_addr, mac_node->mac_addr)) + return mac_node; + + return NULL; +} + +static void hclge_update_mac_node(struct hclge_mac_node *mac_node, + enum HCLGE_MAC_NODE_STATE state) +{ + switch (state) { + /* from set_rx_mode or tmp_add_list */ + case HCLGE_MAC_TO_ADD: + if (mac_node->state == HCLGE_MAC_TO_DEL) + mac_node->state = HCLGE_MAC_ACTIVE; + break; + /* only from set_rx_mode */ + case HCLGE_MAC_TO_DEL: + if (mac_node->state == HCLGE_MAC_TO_ADD) { + list_del(&mac_node->node); + kfree(mac_node); + } else { + mac_node->state = HCLGE_MAC_TO_DEL; + } + break; + /* only from tmp_add_list, the mac_node->state won't be + * ACTIVE. + */ + case HCLGE_MAC_ACTIVE: + if (mac_node->state == HCLGE_MAC_TO_ADD) + mac_node->state = HCLGE_MAC_ACTIVE; + + break; + } +} + +int hclge_update_mac_list(struct hclge_vport *vport, + enum HCLGE_MAC_NODE_STATE state, + enum HCLGE_MAC_ADDR_TYPE mac_type, + const unsigned char *addr) +{ + char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; + struct hclge_dev *hdev = vport->back; + struct hclge_mac_node *mac_node; + struct list_head *list; + + list = (mac_type == HCLGE_MAC_ADDR_UC) ? + &vport->uc_mac_list : &vport->mc_mac_list; + + spin_lock_bh(&vport->mac_list_lock); + + /* if the mac addr is already in the mac list, no need to add a new + * one into it, just check the mac addr state, convert it to a new + * state, or just remove it, or do nothing. + */ + mac_node = hclge_find_mac_node(list, addr); + if (mac_node) { + hclge_update_mac_node(mac_node, state); + spin_unlock_bh(&vport->mac_list_lock); + set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state); + return 0; + } + + /* if this address is never added, unnecessary to delete */ + if (state == HCLGE_MAC_TO_DEL) { + spin_unlock_bh(&vport->mac_list_lock); + hnae3_format_mac_addr(format_mac_addr, addr); + dev_err(&hdev->pdev->dev, + "failed to delete address %s from mac list\n", + format_mac_addr); + return -ENOENT; + } + + mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC); + if (!mac_node) { + spin_unlock_bh(&vport->mac_list_lock); + return -ENOMEM; + } + + set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state); + + mac_node->state = state; + ether_addr_copy(mac_node->mac_addr, addr); + list_add_tail(&mac_node->node, list); + + spin_unlock_bh(&vport->mac_list_lock); + + return 0; +} + +static int hclge_add_uc_addr(struct hnae3_handle *handle, + const unsigned char *addr) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + + return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC, + addr); +} + +int hclge_add_uc_addr_common(struct hclge_vport *vport, + const unsigned char *addr) +{ + char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; + struct hclge_dev *hdev = vport->back; + struct hclge_mac_vlan_tbl_entry_cmd req; + struct hclge_desc desc; + u16 egress_port = 0; + int ret; + + /* mac addr check */ + if (is_zero_ether_addr(addr) || + is_broadcast_ether_addr(addr) || + is_multicast_ether_addr(addr)) { + hnae3_format_mac_addr(format_mac_addr, addr); + dev_err(&hdev->pdev->dev, + "Set_uc mac err! invalid mac:%s. is_zero:%d,is_br=%d,is_mul=%d\n", + format_mac_addr, is_zero_ether_addr(addr), + is_broadcast_ether_addr(addr), + is_multicast_ether_addr(addr)); + return -EINVAL; + } + + memset(&req, 0, sizeof(req)); + + hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M, + HCLGE_MAC_EPORT_VFID_S, vport->vport_id); + + req.egress_port = cpu_to_le16(egress_port); + + hclge_prepare_mac_addr(&req, addr, false); + + /* Lookup the mac address in the mac_vlan table, and add + * it if the entry is inexistent. Repeated unicast entry + * is not allowed in the mac vlan table. + */ + ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false); + if (ret == -ENOENT) { + mutex_lock(&hdev->vport_lock); + if (!hclge_is_umv_space_full(vport, false)) { + ret = hclge_add_mac_vlan_tbl(vport, &req, NULL); + if (!ret) + hclge_update_umv_space(vport, false); + mutex_unlock(&hdev->vport_lock); + return ret; + } + mutex_unlock(&hdev->vport_lock); + + if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE)) + dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n", + hdev->priv_umv_size); + + return -ENOSPC; + } + + /* check if we just hit the duplicate */ + if (!ret) + return -EEXIST; + + return ret; +} + +static int hclge_rm_uc_addr(struct hnae3_handle *handle, + const unsigned char *addr) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + + return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC, + addr); +} + +int hclge_rm_uc_addr_common(struct hclge_vport *vport, + const unsigned char *addr) +{ + char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; + struct hclge_dev *hdev = vport->back; + struct hclge_mac_vlan_tbl_entry_cmd req; + int ret; + + /* mac addr check */ + if (is_zero_ether_addr(addr) || + is_broadcast_ether_addr(addr) || + is_multicast_ether_addr(addr)) { + hnae3_format_mac_addr(format_mac_addr, addr); + dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%s.\n", + format_mac_addr); + return -EINVAL; + } + + memset(&req, 0, sizeof(req)); + hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); + hclge_prepare_mac_addr(&req, addr, false); + ret = hclge_remove_mac_vlan_tbl(vport, &req); + if (!ret || ret == -ENOENT) { + mutex_lock(&hdev->vport_lock); + hclge_update_umv_space(vport, true); + mutex_unlock(&hdev->vport_lock); + return 0; + } + + return ret; +} + +static int hclge_add_mc_addr(struct hnae3_handle *handle, + const unsigned char *addr) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + + return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC, + addr); +} + +int hclge_add_mc_addr_common(struct hclge_vport *vport, + const unsigned char *addr) +{ + char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; + struct hclge_dev *hdev = vport->back; + struct hclge_mac_vlan_tbl_entry_cmd req; + struct hclge_desc desc[3]; + bool is_new_addr = false; + int status; + + /* mac addr check */ + if (!is_multicast_ether_addr(addr)) { + hnae3_format_mac_addr(format_mac_addr, addr); + dev_err(&hdev->pdev->dev, + "Add mc mac err! invalid mac:%s.\n", + format_mac_addr); + return -EINVAL; + } + memset(&req, 0, sizeof(req)); + hclge_prepare_mac_addr(&req, addr, true); + status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); + if (status) { + if (hnae3_ae_dev_mc_mac_mng_supported(hdev->ae_dev) && + hdev->used_mc_mac_num >= + hdev->ae_dev->dev_specs.mc_mac_size) + goto err_no_space; + + is_new_addr = true; + + /* This mac addr do not exist, add new entry for it */ + memset(desc[0].data, 0, sizeof(desc[0].data)); + memset(desc[1].data, 0, sizeof(desc[0].data)); + memset(desc[2].data, 0, sizeof(desc[0].data)); + } + status = hclge_update_desc_vfid(desc, vport->vport_id, false); + if (status) + return status; + status = hclge_add_mac_vlan_tbl(vport, &req, desc); + if (status == -ENOSPC) + goto err_no_space; + else if (!status && is_new_addr) + hdev->used_mc_mac_num++; + + return status; + +err_no_space: + /* if already overflow, not to print each time */ + if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE)) { + vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE; + dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n"); + } + + return -ENOSPC; +} + +static int hclge_rm_mc_addr(struct hnae3_handle *handle, + const unsigned char *addr) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + + return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC, + addr); +} + +int hclge_rm_mc_addr_common(struct hclge_vport *vport, + const unsigned char *addr) +{ + char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; + struct hclge_dev *hdev = vport->back; + struct hclge_mac_vlan_tbl_entry_cmd req; + enum hclge_comm_cmd_status status; + struct hclge_desc desc[3]; + + /* mac addr check */ + if (!is_multicast_ether_addr(addr)) { + hnae3_format_mac_addr(format_mac_addr, addr); + dev_dbg(&hdev->pdev->dev, + "Remove mc mac err! invalid mac:%s.\n", + format_mac_addr); + return -EINVAL; + } + + memset(&req, 0, sizeof(req)); + hclge_prepare_mac_addr(&req, addr, true); + status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); + if (!status) { + /* This mac addr exist, remove this handle's VFID for it */ + status = hclge_update_desc_vfid(desc, vport->vport_id, true); + if (status) + return status; + + if (hclge_is_all_function_id_zero(desc)) { + /* All the vfid is zero, so need to delete this entry */ + status = hclge_remove_mac_vlan_tbl(vport, &req); + if (!status) + hdev->used_mc_mac_num--; + } else { + /* Not all the vfid is zero, update the vfid */ + status = hclge_add_mac_vlan_tbl(vport, &req, desc); + } + } else if (status == -ENOENT) { + status = 0; + } + + return status; +} + +static void hclge_sync_vport_mac_list(struct hclge_vport *vport, + struct list_head *list, + enum HCLGE_MAC_ADDR_TYPE mac_type) +{ + int (*sync)(struct hclge_vport *vport, const unsigned char *addr); + struct hclge_mac_node *mac_node, *tmp; + int ret; + + if (mac_type == HCLGE_MAC_ADDR_UC) + sync = hclge_add_uc_addr_common; + else + sync = hclge_add_mc_addr_common; + + list_for_each_entry_safe(mac_node, tmp, list, node) { + ret = sync(vport, mac_node->mac_addr); + if (!ret) { + mac_node->state = HCLGE_MAC_ACTIVE; + } else { + set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, + &vport->state); + + /* If one unicast mac address is existing in hardware, + * we need to try whether other unicast mac addresses + * are new addresses that can be added. + * Multicast mac address can be reusable, even though + * there is no space to add new multicast mac address, + * we should check whether other mac addresses are + * existing in hardware for reuse. + */ + if ((mac_type == HCLGE_MAC_ADDR_UC && ret != -EEXIST) || + (mac_type == HCLGE_MAC_ADDR_MC && ret != -ENOSPC)) + break; + } + } +} + +static void hclge_unsync_vport_mac_list(struct hclge_vport *vport, + struct list_head *list, + enum HCLGE_MAC_ADDR_TYPE mac_type) +{ + int (*unsync)(struct hclge_vport *vport, const unsigned char *addr); + struct hclge_mac_node *mac_node, *tmp; + int ret; + + if (mac_type == HCLGE_MAC_ADDR_UC) + unsync = hclge_rm_uc_addr_common; + else + unsync = hclge_rm_mc_addr_common; + + list_for_each_entry_safe(mac_node, tmp, list, node) { + ret = unsync(vport, mac_node->mac_addr); + if (!ret || ret == -ENOENT) { + list_del(&mac_node->node); + kfree(mac_node); + } else { + set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, + &vport->state); + break; + } + } +} + +static bool hclge_sync_from_add_list(struct list_head *add_list, + struct list_head *mac_list) +{ + struct hclge_mac_node *mac_node, *tmp, *new_node; + bool all_added = true; + + list_for_each_entry_safe(mac_node, tmp, add_list, node) { + if (mac_node->state == HCLGE_MAC_TO_ADD) + all_added = false; + + /* if the mac address from tmp_add_list is not in the + * uc/mc_mac_list, it means have received a TO_DEL request + * during the time window of adding the mac address into mac + * table. if mac_node state is ACTIVE, then change it to TO_DEL, + * then it will be removed at next time. else it must be TO_ADD, + * this address hasn't been added into mac table, + * so just remove the mac node. + */ + new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr); + if (new_node) { + hclge_update_mac_node(new_node, mac_node->state); + list_del(&mac_node->node); + kfree(mac_node); + } else if (mac_node->state == HCLGE_MAC_ACTIVE) { + mac_node->state = HCLGE_MAC_TO_DEL; + list_move_tail(&mac_node->node, mac_list); + } else { + list_del(&mac_node->node); + kfree(mac_node); + } + } + + return all_added; +} + +static void hclge_sync_from_del_list(struct list_head *del_list, + struct list_head *mac_list) +{ + struct hclge_mac_node *mac_node, *tmp, *new_node; + + list_for_each_entry_safe(mac_node, tmp, del_list, node) { + new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr); + if (new_node) { + /* If the mac addr exists in the mac list, it means + * received a new TO_ADD request during the time window + * of configuring the mac address. For the mac node + * state is TO_ADD, and the address is already in the + * in the hardware(due to delete fail), so we just need + * to change the mac node state to ACTIVE. + */ + new_node->state = HCLGE_MAC_ACTIVE; + list_del(&mac_node->node); + kfree(mac_node); + } else { + list_move_tail(&mac_node->node, mac_list); + } + } +} + +static void hclge_update_overflow_flags(struct hclge_vport *vport, + enum HCLGE_MAC_ADDR_TYPE mac_type, + bool is_all_added) +{ + if (mac_type == HCLGE_MAC_ADDR_UC) { + if (is_all_added) + vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE; + else if (hclge_is_umv_space_full(vport, true)) + vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE; + } else { + if (is_all_added) + vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE; + else + vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE; + } +} + +static void hclge_sync_vport_mac_table(struct hclge_vport *vport, + enum HCLGE_MAC_ADDR_TYPE mac_type) +{ + struct hclge_mac_node *mac_node, *tmp, *new_node; + struct list_head tmp_add_list, tmp_del_list; + struct list_head *list; + bool all_added; + + INIT_LIST_HEAD(&tmp_add_list); + INIT_LIST_HEAD(&tmp_del_list); + + /* move the mac addr to the tmp_add_list and tmp_del_list, then + * we can add/delete these mac addr outside the spin lock + */ + list = (mac_type == HCLGE_MAC_ADDR_UC) ? + &vport->uc_mac_list : &vport->mc_mac_list; + + spin_lock_bh(&vport->mac_list_lock); + + list_for_each_entry_safe(mac_node, tmp, list, node) { + switch (mac_node->state) { + case HCLGE_MAC_TO_DEL: + list_move_tail(&mac_node->node, &tmp_del_list); + break; + case HCLGE_MAC_TO_ADD: + new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC); + if (!new_node) + goto stop_traverse; + ether_addr_copy(new_node->mac_addr, mac_node->mac_addr); + new_node->state = mac_node->state; + list_add_tail(&new_node->node, &tmp_add_list); + break; + default: + break; + } + } + +stop_traverse: + spin_unlock_bh(&vport->mac_list_lock); + + /* delete first, in order to get max mac table space for adding */ + hclge_unsync_vport_mac_list(vport, &tmp_del_list, mac_type); + hclge_sync_vport_mac_list(vport, &tmp_add_list, mac_type); + + /* if some mac addresses were added/deleted fail, move back to the + * mac_list, and retry at next time. + */ + spin_lock_bh(&vport->mac_list_lock); + + hclge_sync_from_del_list(&tmp_del_list, list); + all_added = hclge_sync_from_add_list(&tmp_add_list, list); + + spin_unlock_bh(&vport->mac_list_lock); + + hclge_update_overflow_flags(vport, mac_type, all_added); +} + +static bool hclge_need_sync_mac_table(struct hclge_vport *vport) +{ + struct hclge_dev *hdev = vport->back; + + if (test_bit(vport->vport_id, hdev->vport_config_block)) + return false; + + if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state)) + return true; + + return false; +} + +static void hclge_sync_mac_table(struct hclge_dev *hdev) +{ + int i; + + for (i = 0; i < hdev->num_alloc_vport; i++) { + struct hclge_vport *vport = &hdev->vport[i]; + + if (!hclge_need_sync_mac_table(vport)) + continue; + + hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC); + hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC); + } +} + +static void hclge_build_del_list(struct list_head *list, + bool is_del_list, + struct list_head *tmp_del_list) +{ + struct hclge_mac_node *mac_cfg, *tmp; + + list_for_each_entry_safe(mac_cfg, tmp, list, node) { + switch (mac_cfg->state) { + case HCLGE_MAC_TO_DEL: + case HCLGE_MAC_ACTIVE: + list_move_tail(&mac_cfg->node, tmp_del_list); + break; + case HCLGE_MAC_TO_ADD: + if (is_del_list) { + list_del(&mac_cfg->node); + kfree(mac_cfg); + } + break; + } + } +} + +static void hclge_unsync_del_list(struct hclge_vport *vport, + int (*unsync)(struct hclge_vport *vport, + const unsigned char *addr), + bool is_del_list, + struct list_head *tmp_del_list) +{ + struct hclge_mac_node *mac_cfg, *tmp; + int ret; + + list_for_each_entry_safe(mac_cfg, tmp, tmp_del_list, node) { + ret = unsync(vport, mac_cfg->mac_addr); + if (!ret || ret == -ENOENT) { + /* clear all mac addr from hardware, but remain these + * mac addr in the mac list, and restore them after + * vf reset finished. + */ + if (!is_del_list && + mac_cfg->state == HCLGE_MAC_ACTIVE) { + mac_cfg->state = HCLGE_MAC_TO_ADD; + } else { + list_del(&mac_cfg->node); + kfree(mac_cfg); + } + } else if (is_del_list) { + mac_cfg->state = HCLGE_MAC_TO_DEL; + } + } +} + +void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list, + enum HCLGE_MAC_ADDR_TYPE mac_type) +{ + int (*unsync)(struct hclge_vport *vport, const unsigned char *addr); + struct hclge_dev *hdev = vport->back; + struct list_head tmp_del_list, *list; + + if (mac_type == HCLGE_MAC_ADDR_UC) { + list = &vport->uc_mac_list; + unsync = hclge_rm_uc_addr_common; + } else { + list = &vport->mc_mac_list; + unsync = hclge_rm_mc_addr_common; + } + + INIT_LIST_HEAD(&tmp_del_list); + + if (!is_del_list) + set_bit(vport->vport_id, hdev->vport_config_block); + + spin_lock_bh(&vport->mac_list_lock); + + hclge_build_del_list(list, is_del_list, &tmp_del_list); + + spin_unlock_bh(&vport->mac_list_lock); + + hclge_unsync_del_list(vport, unsync, is_del_list, &tmp_del_list); + + spin_lock_bh(&vport->mac_list_lock); + + hclge_sync_from_del_list(&tmp_del_list, list); + + spin_unlock_bh(&vport->mac_list_lock); +} + +/* remove all mac address when uninitailize */ +static void hclge_uninit_vport_mac_list(struct hclge_vport *vport, + enum HCLGE_MAC_ADDR_TYPE mac_type) +{ + struct hclge_mac_node *mac_node, *tmp; + struct hclge_dev *hdev = vport->back; + struct list_head tmp_del_list, *list; + + INIT_LIST_HEAD(&tmp_del_list); + + list = (mac_type == HCLGE_MAC_ADDR_UC) ? + &vport->uc_mac_list : &vport->mc_mac_list; + + spin_lock_bh(&vport->mac_list_lock); + + list_for_each_entry_safe(mac_node, tmp, list, node) { + switch (mac_node->state) { + case HCLGE_MAC_TO_DEL: + case HCLGE_MAC_ACTIVE: + list_move_tail(&mac_node->node, &tmp_del_list); + break; + case HCLGE_MAC_TO_ADD: + list_del(&mac_node->node); + kfree(mac_node); + break; + } + } + + spin_unlock_bh(&vport->mac_list_lock); + + hclge_unsync_vport_mac_list(vport, &tmp_del_list, mac_type); + + if (!list_empty(&tmp_del_list)) + dev_warn(&hdev->pdev->dev, + "uninit %s mac list for vport %u not completely.\n", + mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc", + vport->vport_id); + + list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) { + list_del(&mac_node->node); + kfree(mac_node); + } +} + +static void hclge_uninit_mac_table(struct hclge_dev *hdev) +{ + struct hclge_vport *vport; + int i; + + for (i = 0; i < hdev->num_alloc_vport; i++) { + vport = &hdev->vport[i]; + hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC); + hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC); + } +} + +static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev, + u16 cmdq_resp, u8 resp_code) +{ +#define HCLGE_ETHERTYPE_SUCCESS_ADD 0 +#define HCLGE_ETHERTYPE_ALREADY_ADD 1 +#define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2 +#define HCLGE_ETHERTYPE_KEY_CONFLICT 3 + + int return_status; + + if (cmdq_resp) { + dev_err(&hdev->pdev->dev, + "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n", + cmdq_resp); + return -EIO; + } + + switch (resp_code) { + case HCLGE_ETHERTYPE_SUCCESS_ADD: + case HCLGE_ETHERTYPE_ALREADY_ADD: + return_status = 0; + break; + case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW: + dev_err(&hdev->pdev->dev, + "add mac ethertype failed for manager table overflow.\n"); + return_status = -EIO; + break; + case HCLGE_ETHERTYPE_KEY_CONFLICT: + dev_err(&hdev->pdev->dev, + "add mac ethertype failed for key conflict.\n"); + return_status = -EIO; + break; + default: + dev_err(&hdev->pdev->dev, + "add mac ethertype failed for undefined, code=%u.\n", + resp_code); + return_status = -EIO; + } + + return return_status; +} + +static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf, + u8 *mac_addr) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; + struct hclge_dev *hdev = vport->back; + + vport = hclge_get_vf_vport(hdev, vf); + if (!vport) + return -EINVAL; + + hnae3_format_mac_addr(format_mac_addr, mac_addr); + if (ether_addr_equal(mac_addr, vport->vf_info.mac)) { + dev_info(&hdev->pdev->dev, + "Specified MAC(=%s) is same as before, no change committed!\n", + format_mac_addr); + return 0; + } + + ether_addr_copy(vport->vf_info.mac, mac_addr); + + /* there is a timewindow for PF to know VF unalive, it may + * cause send mailbox fail, but it doesn't matter, VF will + * query it when reinit. + */ + if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) { + dev_info(&hdev->pdev->dev, + "MAC of VF %d has been set to %s, and it will be reinitialized!\n", + vf, format_mac_addr); + (void)hclge_inform_reset_assert_to_vf(vport); + return 0; + } + + dev_info(&hdev->pdev->dev, + "MAC of VF %d has been set to %s, will be active after VF reset\n", + vf, format_mac_addr); + return 0; +} + +static int hclge_add_mgr_tbl(struct hclge_dev *hdev, + const struct hclge_mac_mgr_tbl_entry_cmd *req) +{ + struct hclge_desc desc; + u8 resp_code; + u16 retval; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false); + memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd)); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "add mac ethertype failed for cmd_send, ret =%d.\n", + ret); + return ret; + } + + resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; + retval = le16_to_cpu(desc.retval); + + return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code); +} + +static int init_mgr_tbl(struct hclge_dev *hdev) +{ + int ret; + int i; + + for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) { + ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]); + if (ret) { + dev_err(&hdev->pdev->dev, + "add mac ethertype failed, ret =%d.\n", + ret); + return ret; + } + } + + return 0; +} + +static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + ether_addr_copy(p, hdev->hw.mac.mac_addr); +} + +int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport, + const u8 *old_addr, const u8 *new_addr) +{ + struct list_head *list = &vport->uc_mac_list; + struct hclge_mac_node *old_node, *new_node; + + new_node = hclge_find_mac_node(list, new_addr); + if (!new_node) { + new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC); + if (!new_node) + return -ENOMEM; + + new_node->state = HCLGE_MAC_TO_ADD; + ether_addr_copy(new_node->mac_addr, new_addr); + list_add(&new_node->node, list); + } else { + if (new_node->state == HCLGE_MAC_TO_DEL) + new_node->state = HCLGE_MAC_ACTIVE; + + /* make sure the new addr is in the list head, avoid dev + * addr may be not re-added into mac table for the umv space + * limitation after global/imp reset which will clear mac + * table by hardware. + */ + list_move(&new_node->node, list); + } + + if (old_addr && !ether_addr_equal(old_addr, new_addr)) { + old_node = hclge_find_mac_node(list, old_addr); + if (old_node) { + if (old_node->state == HCLGE_MAC_TO_ADD) { + list_del(&old_node->node); + kfree(old_node); + } else { + old_node->state = HCLGE_MAC_TO_DEL; + } + } + } + + set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state); + + return 0; +} + +static int hclge_set_mac_addr(struct hnae3_handle *handle, const void *p, + bool is_first) +{ + const unsigned char *new_addr = (const unsigned char *)p; + struct hclge_vport *vport = hclge_get_vport(handle); + char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; + struct hclge_dev *hdev = vport->back; + unsigned char *old_addr = NULL; + int ret; + + /* mac addr check */ + if (is_zero_ether_addr(new_addr) || + is_broadcast_ether_addr(new_addr) || + is_multicast_ether_addr(new_addr)) { + hnae3_format_mac_addr(format_mac_addr, new_addr); + dev_err(&hdev->pdev->dev, + "change uc mac err! invalid mac: %s.\n", + format_mac_addr); + return -EINVAL; + } + + ret = hclge_pause_addr_cfg(hdev, new_addr); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to configure mac pause address, ret = %d\n", + ret); + return ret; + } + + if (!is_first) + old_addr = hdev->hw.mac.mac_addr; + + spin_lock_bh(&vport->mac_list_lock); + ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr); + if (ret) { + hnae3_format_mac_addr(format_mac_addr, new_addr); + dev_err(&hdev->pdev->dev, + "failed to change the mac addr:%s, ret = %d\n", + format_mac_addr, ret); + spin_unlock_bh(&vport->mac_list_lock); + + if (!is_first) + hclge_pause_addr_cfg(hdev, old_addr); + + return ret; + } + /* we must update dev addr with spin lock protect, preventing dev addr + * being removed by set_rx_mode path. + */ + ether_addr_copy(hdev->hw.mac.mac_addr, new_addr); + spin_unlock_bh(&vport->mac_list_lock); + + hclge_task_schedule(hdev, 0); + + return 0; +} + +static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd) +{ + struct mii_ioctl_data *data = if_mii(ifr); + + if (!hnae3_dev_phy_imp_supported(hdev)) + return -EOPNOTSUPP; + + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = hdev->hw.mac.phy_addr; + /* this command reads phy id and register at the same time */ + fallthrough; + case SIOCGMIIREG: + data->val_out = hclge_read_phy_reg(hdev, data->reg_num); + return 0; + + case SIOCSMIIREG: + return hclge_write_phy_reg(hdev, data->reg_num, data->val_in); + default: + return -EOPNOTSUPP; + } +} + +static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr, + int cmd) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + switch (cmd) { + case SIOCGHWTSTAMP: + return hclge_ptp_get_cfg(hdev, ifr); + case SIOCSHWTSTAMP: + return hclge_ptp_set_cfg(hdev, ifr); + default: + if (!hdev->hw.mac.phydev) + return hclge_mii_ioctl(hdev, ifr, cmd); + } + + return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd); +} + +static int hclge_set_port_vlan_filter_bypass(struct hclge_dev *hdev, u8 vf_id, + bool bypass_en) +{ + struct hclge_port_vlan_filter_bypass_cmd *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PORT_VLAN_BYPASS, false); + req = (struct hclge_port_vlan_filter_bypass_cmd *)desc.data; + req->vf_id = vf_id; + hnae3_set_bit(req->bypass_state, HCLGE_INGRESS_BYPASS_B, + bypass_en ? 1 : 0); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to set vport%u port vlan filter bypass state, ret = %d.\n", + vf_id, ret); + + return ret; +} + +static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type, + u8 fe_type, bool filter_en, u8 vf_id) +{ + struct hclge_vlan_filter_ctrl_cmd *req; + struct hclge_desc desc; + int ret; + + /* read current vlan filter parameter */ + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true); + req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data; + req->vlan_type = vlan_type; + req->vf_id = vf_id; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, "failed to get vport%u vlan filter config, ret = %d.\n", + vf_id, ret); + return ret; + } + + /* modify and write new config parameter */ + hclge_comm_cmd_reuse_desc(&desc, false); + req->vlan_fe = filter_en ? + (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, "failed to set vport%u vlan filter, ret = %d.\n", + vf_id, ret); + + return ret; +} + +static int hclge_set_vport_vlan_filter(struct hclge_vport *vport, bool enable) +{ + struct hclge_dev *hdev = vport->back; + struct hnae3_ae_dev *ae_dev = hdev->ae_dev; + int ret; + + if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) + return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, + HCLGE_FILTER_FE_EGRESS_V1_B, + enable, vport->vport_id); + + ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, + HCLGE_FILTER_FE_EGRESS, enable, + vport->vport_id); + if (ret) + return ret; + + if (test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, ae_dev->caps)) { + ret = hclge_set_port_vlan_filter_bypass(hdev, vport->vport_id, + !enable); + } else if (!vport->vport_id) { + if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps)) + enable = false; + + ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, + HCLGE_FILTER_FE_INGRESS, + enable, 0); + } + + return ret; +} + +static bool hclge_need_enable_vport_vlan_filter(struct hclge_vport *vport) +{ + struct hnae3_handle *handle = &vport->nic; + struct hclge_vport_vlan_cfg *vlan, *tmp; + struct hclge_dev *hdev = vport->back; + + if (vport->vport_id) { + if (vport->port_base_vlan_cfg.state != + HNAE3_PORT_BASE_VLAN_DISABLE) + return true; + + if (vport->vf_info.trusted && vport->vf_info.request_uc_en) + return false; + } else if (handle->netdev_flags & HNAE3_USER_UPE) { + return false; + } + + if (!vport->req_vlan_fltr_en) + return false; + + /* compatible with former device, always enable vlan filter */ + if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps)) + return true; + + list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) + if (vlan->vlan_id != 0) + return true; + + return false; +} + +int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en) +{ + struct hclge_dev *hdev = vport->back; + bool need_en; + int ret; + + mutex_lock(&hdev->vport_lock); + + vport->req_vlan_fltr_en = request_en; + + need_en = hclge_need_enable_vport_vlan_filter(vport); + if (need_en == vport->cur_vlan_fltr_en) { + mutex_unlock(&hdev->vport_lock); + return 0; + } + + ret = hclge_set_vport_vlan_filter(vport, need_en); + if (ret) { + mutex_unlock(&hdev->vport_lock); + return ret; + } + + vport->cur_vlan_fltr_en = need_en; + + mutex_unlock(&hdev->vport_lock); + + return 0; +} + +static int hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + + return hclge_enable_vport_vlan_filter(vport, enable); +} + +static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid, + bool is_kill, u16 vlan, + struct hclge_desc *desc) +{ + struct hclge_vlan_filter_vf_cfg_cmd *req0; + struct hclge_vlan_filter_vf_cfg_cmd *req1; + u8 vf_byte_val; + u8 vf_byte_off; + int ret; + + hclge_cmd_setup_basic_desc(&desc[0], + HCLGE_OPC_VLAN_FILTER_VF_CFG, false); + hclge_cmd_setup_basic_desc(&desc[1], + HCLGE_OPC_VLAN_FILTER_VF_CFG, false); + + desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + + vf_byte_off = vfid / 8; + vf_byte_val = 1 << (vfid % 8); + + req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data; + req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data; + + req0->vlan_id = cpu_to_le16(vlan); + req0->vlan_cfg = is_kill; + + if (vf_byte_off < HCLGE_MAX_VF_BYTES) + req0->vf_bitmap[vf_byte_off] = vf_byte_val; + else + req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val; + + ret = hclge_cmd_send(&hdev->hw, desc, 2); + if (ret) { + dev_err(&hdev->pdev->dev, + "Send vf vlan command fail, ret =%d.\n", + ret); + return ret; + } + + return 0; +} + +static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid, + bool is_kill, struct hclge_desc *desc) +{ + struct hclge_vlan_filter_vf_cfg_cmd *req; + + req = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data; + + if (!is_kill) { +#define HCLGE_VF_VLAN_NO_ENTRY 2 + if (!req->resp_code || req->resp_code == 1) + return 0; + + if (req->resp_code == HCLGE_VF_VLAN_NO_ENTRY) { + set_bit(vfid, hdev->vf_vlan_full); + dev_warn(&hdev->pdev->dev, + "vf vlan table is full, vf vlan filter is disabled\n"); + return 0; + } + + dev_err(&hdev->pdev->dev, + "Add vf vlan filter fail, ret =%u.\n", + req->resp_code); + } else { +#define HCLGE_VF_VLAN_DEL_NO_FOUND 1 + if (!req->resp_code) + return 0; + + /* vf vlan filter is disabled when vf vlan table is full, + * then new vlan id will not be added into vf vlan table. + * Just return 0 without warning, avoid massive verbose + * print logs when unload. + */ + if (req->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) + return 0; + + dev_err(&hdev->pdev->dev, + "Kill vf vlan filter fail, ret =%u.\n", + req->resp_code); + } + + return -EIO; +} + +static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid, + bool is_kill, u16 vlan) +{ + struct hclge_vport *vport = &hdev->vport[vfid]; + struct hclge_desc desc[2]; + int ret; + + /* if vf vlan table is full, firmware will close vf vlan filter, it + * is unable and unnecessary to add new vlan id to vf vlan filter. + * If spoof check is enable, and vf vlan is full, it shouldn't add + * new vlan, because tx packets with these vlan id will be dropped. + */ + if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) { + if (vport->vf_info.spoofchk && vlan) { + dev_err(&hdev->pdev->dev, + "Can't add vlan due to spoof check is on and vf vlan table is full\n"); + return -EPERM; + } + return 0; + } + + ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc); + if (ret) + return ret; + + return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc); +} + +static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto, + u16 vlan_id, bool is_kill) +{ + struct hclge_vlan_filter_pf_cfg_cmd *req; + struct hclge_desc desc; + u8 vlan_offset_byte_val; + u8 vlan_offset_byte; + u8 vlan_offset_160; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false); + + vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP; + vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) / + HCLGE_VLAN_BYTE_SIZE; + vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE); + + req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data; + req->vlan_offset = vlan_offset_160; + req->vlan_cfg = is_kill; + req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "port vlan command, send fail, ret =%d.\n", ret); + return ret; +} + +static bool hclge_need_update_port_vlan(struct hclge_dev *hdev, u16 vport_id, + u16 vlan_id, bool is_kill) +{ + /* vlan 0 may be added twice when 8021q module is enabled */ + if (!is_kill && !vlan_id && + test_bit(vport_id, hdev->vlan_table[vlan_id])) + return false; + + if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) { + dev_warn(&hdev->pdev->dev, + "Add port vlan failed, vport %u is already in vlan %u\n", + vport_id, vlan_id); + return false; + } + + if (is_kill && + !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) { + dev_warn(&hdev->pdev->dev, + "Delete port vlan failed, vport %u is not in vlan %u\n", + vport_id, vlan_id); + return false; + } + + return true; +} + +static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto, + u16 vport_id, u16 vlan_id, + bool is_kill) +{ + u16 vport_idx, vport_num = 0; + int ret; + + if (is_kill && !vlan_id) + return 0; + + if (vlan_id >= VLAN_N_VID) + return -EINVAL; + + ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id); + if (ret) { + dev_err(&hdev->pdev->dev, + "Set %u vport vlan filter config fail, ret =%d.\n", + vport_id, ret); + return ret; + } + + if (!hclge_need_update_port_vlan(hdev, vport_id, vlan_id, is_kill)) + return 0; + + for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM) + vport_num++; + + if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1)) + ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id, + is_kill); + + return ret; +} + +static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport) +{ + struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg; + struct hclge_vport_vtag_tx_cfg_cmd *req; + struct hclge_dev *hdev = vport->back; + struct hclge_desc desc; + u16 bmap_index; + int status; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false); + + req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data; + req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1); + req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B, + vcfg->accept_tag1 ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B, + vcfg->accept_untag1 ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B, + vcfg->accept_tag2 ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B, + vcfg->accept_untag2 ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B, + vcfg->insert_tag1_en ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B, + vcfg->insert_tag2_en ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B, + vcfg->tag_shift_mode_en ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0); + + req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; + bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD / + HCLGE_VF_NUM_PER_BYTE; + req->vf_bitmap[bmap_index] = + 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE); + + status = hclge_cmd_send(&hdev->hw, &desc, 1); + if (status) + dev_err(&hdev->pdev->dev, + "Send port txvlan cfg command fail, ret =%d\n", + status); + + return status; +} + +static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport) +{ + struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg; + struct hclge_vport_vtag_rx_cfg_cmd *req; + struct hclge_dev *hdev = vport->back; + struct hclge_desc desc; + u16 bmap_index; + int status; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false); + + req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data; + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B, + vcfg->strip_tag1_en ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B, + vcfg->strip_tag2_en ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B, + vcfg->vlan1_vlan_prionly ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B, + vcfg->vlan2_vlan_prionly ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B, + vcfg->strip_tag1_discard_en ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B, + vcfg->strip_tag2_discard_en ? 1 : 0); + + req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; + bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD / + HCLGE_VF_NUM_PER_BYTE; + req->vf_bitmap[bmap_index] = + 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE); + + status = hclge_cmd_send(&hdev->hw, &desc, 1); + if (status) + dev_err(&hdev->pdev->dev, + "Send port rxvlan cfg command fail, ret =%d\n", + status); + + return status; +} + +static int hclge_vlan_offload_cfg(struct hclge_vport *vport, + u16 port_base_vlan_state, + u16 vlan_tag, u8 qos) +{ + int ret; + + if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) { + vport->txvlan_cfg.accept_tag1 = true; + vport->txvlan_cfg.insert_tag1_en = false; + vport->txvlan_cfg.default_tag1 = 0; + } else { + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev); + + vport->txvlan_cfg.accept_tag1 = + ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3; + vport->txvlan_cfg.insert_tag1_en = true; + vport->txvlan_cfg.default_tag1 = (qos << VLAN_PRIO_SHIFT) | + vlan_tag; + } + + vport->txvlan_cfg.accept_untag1 = true; + + /* accept_tag2 and accept_untag2 are not supported on + * pdev revision(0x20), new revision support them, + * this two fields can not be configured by user. + */ + vport->txvlan_cfg.accept_tag2 = true; + vport->txvlan_cfg.accept_untag2 = true; + vport->txvlan_cfg.insert_tag2_en = false; + vport->txvlan_cfg.default_tag2 = 0; + vport->txvlan_cfg.tag_shift_mode_en = true; + + if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) { + vport->rxvlan_cfg.strip_tag1_en = false; + vport->rxvlan_cfg.strip_tag2_en = + vport->rxvlan_cfg.rx_vlan_offload_en; + vport->rxvlan_cfg.strip_tag2_discard_en = false; + } else { + vport->rxvlan_cfg.strip_tag1_en = + vport->rxvlan_cfg.rx_vlan_offload_en; + vport->rxvlan_cfg.strip_tag2_en = true; + vport->rxvlan_cfg.strip_tag2_discard_en = true; + } + + vport->rxvlan_cfg.strip_tag1_discard_en = false; + vport->rxvlan_cfg.vlan1_vlan_prionly = false; + vport->rxvlan_cfg.vlan2_vlan_prionly = false; + + ret = hclge_set_vlan_tx_offload_cfg(vport); + if (ret) + return ret; + + return hclge_set_vlan_rx_offload_cfg(vport); +} + +static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev) +{ + struct hclge_rx_vlan_type_cfg_cmd *rx_req; + struct hclge_tx_vlan_type_cfg_cmd *tx_req; + struct hclge_desc desc; + int status; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false); + rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data; + rx_req->ot_fst_vlan_type = + cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type); + rx_req->ot_sec_vlan_type = + cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type); + rx_req->in_fst_vlan_type = + cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type); + rx_req->in_sec_vlan_type = + cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type); + + status = hclge_cmd_send(&hdev->hw, &desc, 1); + if (status) { + dev_err(&hdev->pdev->dev, + "Send rxvlan protocol type command fail, ret =%d\n", + status); + return status; + } + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false); + + tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data; + tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type); + tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type); + + status = hclge_cmd_send(&hdev->hw, &desc, 1); + if (status) + dev_err(&hdev->pdev->dev, + "Send txvlan protocol type command fail, ret =%d\n", + status); + + return status; +} + +static int hclge_init_vlan_filter(struct hclge_dev *hdev) +{ + struct hclge_vport *vport; + int ret; + int i; + + if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) + return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, + HCLGE_FILTER_FE_EGRESS_V1_B, + true, 0); + + /* for revision 0x21, vf vlan filter is per function */ + for (i = 0; i < hdev->num_alloc_vport; i++) { + vport = &hdev->vport[i]; + ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, + HCLGE_FILTER_FE_EGRESS, true, + vport->vport_id); + if (ret) + return ret; + vport->cur_vlan_fltr_en = true; + } + + return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, + HCLGE_FILTER_FE_INGRESS, true, 0); +} + +static int hclge_init_vlan_type(struct hclge_dev *hdev) +{ + hdev->vlan_type_cfg.rx_in_fst_vlan_type = ETH_P_8021Q; + hdev->vlan_type_cfg.rx_in_sec_vlan_type = ETH_P_8021Q; + hdev->vlan_type_cfg.rx_ot_fst_vlan_type = ETH_P_8021Q; + hdev->vlan_type_cfg.rx_ot_sec_vlan_type = ETH_P_8021Q; + hdev->vlan_type_cfg.tx_ot_vlan_type = ETH_P_8021Q; + hdev->vlan_type_cfg.tx_in_vlan_type = ETH_P_8021Q; + + return hclge_set_vlan_protocol_type(hdev); +} + +static int hclge_init_vport_vlan_offload(struct hclge_dev *hdev) +{ + struct hclge_port_base_vlan_config *cfg; + struct hclge_vport *vport; + int ret; + int i; + + for (i = 0; i < hdev->num_alloc_vport; i++) { + vport = &hdev->vport[i]; + cfg = &vport->port_base_vlan_cfg; + + ret = hclge_vlan_offload_cfg(vport, cfg->state, + cfg->vlan_info.vlan_tag, + cfg->vlan_info.qos); + if (ret) + return ret; + } + return 0; +} + +static int hclge_init_vlan_config(struct hclge_dev *hdev) +{ + struct hnae3_handle *handle = &hdev->vport[0].nic; + int ret; + + ret = hclge_init_vlan_filter(hdev); + if (ret) + return ret; + + ret = hclge_init_vlan_type(hdev); + if (ret) + return ret; + + ret = hclge_init_vport_vlan_offload(hdev); + if (ret) + return ret; + + return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false); +} + +static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id, + bool writen_to_tbl) +{ + struct hclge_vport_vlan_cfg *vlan, *tmp; + struct hclge_dev *hdev = vport->back; + + mutex_lock(&hdev->vport_lock); + + list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { + if (vlan->vlan_id == vlan_id) { + mutex_unlock(&hdev->vport_lock); + return; + } + } + + vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); + if (!vlan) { + mutex_unlock(&hdev->vport_lock); + return; + } + + vlan->hd_tbl_status = writen_to_tbl; + vlan->vlan_id = vlan_id; + + list_add_tail(&vlan->node, &vport->vlan_list); + mutex_unlock(&hdev->vport_lock); +} + +static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport) +{ + struct hclge_vport_vlan_cfg *vlan, *tmp; + struct hclge_dev *hdev = vport->back; + int ret; + + mutex_lock(&hdev->vport_lock); + + list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { + if (!vlan->hd_tbl_status) { + ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), + vport->vport_id, + vlan->vlan_id, false); + if (ret) { + dev_err(&hdev->pdev->dev, + "restore vport vlan list failed, ret=%d\n", + ret); + + mutex_unlock(&hdev->vport_lock); + return ret; + } + } + vlan->hd_tbl_status = true; + } + + mutex_unlock(&hdev->vport_lock); + + return 0; +} + +static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id, + bool is_write_tbl) +{ + struct hclge_vport_vlan_cfg *vlan, *tmp; + struct hclge_dev *hdev = vport->back; + + list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { + if (vlan->vlan_id == vlan_id) { + if (is_write_tbl && vlan->hd_tbl_status) + hclge_set_vlan_filter_hw(hdev, + htons(ETH_P_8021Q), + vport->vport_id, + vlan_id, + true); + + list_del(&vlan->node); + kfree(vlan); + break; + } + } +} + +void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list) +{ + struct hclge_vport_vlan_cfg *vlan, *tmp; + struct hclge_dev *hdev = vport->back; + + mutex_lock(&hdev->vport_lock); + + list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { + if (vlan->hd_tbl_status) + hclge_set_vlan_filter_hw(hdev, + htons(ETH_P_8021Q), + vport->vport_id, + vlan->vlan_id, + true); + + vlan->hd_tbl_status = false; + if (is_del_list) { + list_del(&vlan->node); + kfree(vlan); + } + } + clear_bit(vport->vport_id, hdev->vf_vlan_full); + mutex_unlock(&hdev->vport_lock); +} + +void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev) +{ + struct hclge_vport_vlan_cfg *vlan, *tmp; + struct hclge_vport *vport; + int i; + + mutex_lock(&hdev->vport_lock); + + for (i = 0; i < hdev->num_alloc_vport; i++) { + vport = &hdev->vport[i]; + list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { + list_del(&vlan->node); + kfree(vlan); + } + } + + mutex_unlock(&hdev->vport_lock); +} + +void hclge_restore_vport_port_base_vlan_config(struct hclge_dev *hdev) +{ + struct hclge_vlan_info *vlan_info; + struct hclge_vport *vport; + u16 vlan_proto; + u16 vlan_id; + u16 state; + int vf_id; + int ret; + + /* PF should restore all vfs port base vlan */ + for (vf_id = 0; vf_id < hdev->num_alloc_vfs; vf_id++) { + vport = &hdev->vport[vf_id + HCLGE_VF_VPORT_START_NUM]; + vlan_info = vport->port_base_vlan_cfg.tbl_sta ? + &vport->port_base_vlan_cfg.vlan_info : + &vport->port_base_vlan_cfg.old_vlan_info; + + vlan_id = vlan_info->vlan_tag; + vlan_proto = vlan_info->vlan_proto; + state = vport->port_base_vlan_cfg.state; + + if (state != HNAE3_PORT_BASE_VLAN_DISABLE) { + clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]); + ret = hclge_set_vlan_filter_hw(hdev, htons(vlan_proto), + vport->vport_id, + vlan_id, false); + vport->port_base_vlan_cfg.tbl_sta = ret == 0; + } + } +} + +void hclge_restore_vport_vlan_table(struct hclge_vport *vport) +{ + struct hclge_vport_vlan_cfg *vlan, *tmp; + struct hclge_dev *hdev = vport->back; + int ret; + + mutex_lock(&hdev->vport_lock); + + if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) { + list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { + ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), + vport->vport_id, + vlan->vlan_id, false); + if (ret) + break; + vlan->hd_tbl_status = true; + } + } + + mutex_unlock(&hdev->vport_lock); +} + +/* For global reset and imp reset, hardware will clear the mac table, + * so we change the mac address state from ACTIVE to TO_ADD, then they + * can be restored in the service task after reset complete. Furtherly, + * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to + * be restored after reset, so just remove these mac nodes from mac_list. + */ +static void hclge_mac_node_convert_for_reset(struct list_head *list) +{ + struct hclge_mac_node *mac_node, *tmp; + + list_for_each_entry_safe(mac_node, tmp, list, node) { + if (mac_node->state == HCLGE_MAC_ACTIVE) { + mac_node->state = HCLGE_MAC_TO_ADD; + } else if (mac_node->state == HCLGE_MAC_TO_DEL) { + list_del(&mac_node->node); + kfree(mac_node); + } + } +} + +void hclge_restore_mac_table_common(struct hclge_vport *vport) +{ + spin_lock_bh(&vport->mac_list_lock); + + hclge_mac_node_convert_for_reset(&vport->uc_mac_list); + hclge_mac_node_convert_for_reset(&vport->mc_mac_list); + set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state); + + spin_unlock_bh(&vport->mac_list_lock); +} + +static void hclge_restore_hw_table(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = &hdev->vport[0]; + struct hnae3_handle *handle = &vport->nic; + + hclge_restore_mac_table_common(vport); + hclge_restore_vport_port_base_vlan_config(hdev); + hclge_restore_vport_vlan_table(vport); + set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state); + hclge_restore_fd_entries(handle); +} + +int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + + if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) { + vport->rxvlan_cfg.strip_tag1_en = false; + vport->rxvlan_cfg.strip_tag2_en = enable; + vport->rxvlan_cfg.strip_tag2_discard_en = false; + } else { + vport->rxvlan_cfg.strip_tag1_en = enable; + vport->rxvlan_cfg.strip_tag2_en = true; + vport->rxvlan_cfg.strip_tag2_discard_en = true; + } + + vport->rxvlan_cfg.strip_tag1_discard_en = false; + vport->rxvlan_cfg.vlan1_vlan_prionly = false; + vport->rxvlan_cfg.vlan2_vlan_prionly = false; + vport->rxvlan_cfg.rx_vlan_offload_en = enable; + + return hclge_set_vlan_rx_offload_cfg(vport); +} + +static void hclge_set_vport_vlan_fltr_change(struct hclge_vport *vport) +{ + struct hclge_dev *hdev = vport->back; + + if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps)) + set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, &vport->state); +} + +static int hclge_update_vlan_filter_entries(struct hclge_vport *vport, + u16 port_base_vlan_state, + struct hclge_vlan_info *new_info, + struct hclge_vlan_info *old_info) +{ + struct hclge_dev *hdev = vport->back; + int ret; + + if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) { + hclge_rm_vport_all_vlan_table(vport, false); + /* force clear VLAN 0 */ + ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, true, 0); + if (ret) + return ret; + return hclge_set_vlan_filter_hw(hdev, + htons(new_info->vlan_proto), + vport->vport_id, + new_info->vlan_tag, + false); + } + + vport->port_base_vlan_cfg.tbl_sta = false; + + /* force add VLAN 0 */ + ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, false, 0); + if (ret) + return ret; + + ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto), + vport->vport_id, old_info->vlan_tag, + true); + if (ret) + return ret; + + return hclge_add_vport_all_vlan_table(vport); +} + +static bool hclge_need_update_vlan_filter(const struct hclge_vlan_info *new_cfg, + const struct hclge_vlan_info *old_cfg) +{ + if (new_cfg->vlan_tag != old_cfg->vlan_tag) + return true; + + if (new_cfg->vlan_tag == 0 && (new_cfg->qos == 0 || old_cfg->qos == 0)) + return true; + + return false; +} + +static int hclge_modify_port_base_vlan_tag(struct hclge_vport *vport, + struct hclge_vlan_info *new_info, + struct hclge_vlan_info *old_info) +{ + struct hclge_dev *hdev = vport->back; + int ret; + + /* add new VLAN tag */ + ret = hclge_set_vlan_filter_hw(hdev, htons(new_info->vlan_proto), + vport->vport_id, new_info->vlan_tag, + false); + if (ret) + return ret; + + vport->port_base_vlan_cfg.tbl_sta = false; + /* remove old VLAN tag */ + if (old_info->vlan_tag == 0) + ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, + true, 0); + else + ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), + vport->vport_id, + old_info->vlan_tag, true); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to clear vport%u port base vlan %u, ret = %d.\n", + vport->vport_id, old_info->vlan_tag, ret); + + return ret; +} + +int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state, + struct hclge_vlan_info *vlan_info) +{ + struct hnae3_handle *nic = &vport->nic; + struct hclge_vlan_info *old_vlan_info; + int ret; + + old_vlan_info = &vport->port_base_vlan_cfg.vlan_info; + + ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag, + vlan_info->qos); + if (ret) + return ret; + + if (!hclge_need_update_vlan_filter(vlan_info, old_vlan_info)) + goto out; + + if (state == HNAE3_PORT_BASE_VLAN_MODIFY) + ret = hclge_modify_port_base_vlan_tag(vport, vlan_info, + old_vlan_info); + else + ret = hclge_update_vlan_filter_entries(vport, state, vlan_info, + old_vlan_info); + if (ret) + return ret; + +out: + vport->port_base_vlan_cfg.state = state; + if (state == HNAE3_PORT_BASE_VLAN_DISABLE) + nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE; + else + nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE; + + vport->port_base_vlan_cfg.old_vlan_info = *old_vlan_info; + vport->port_base_vlan_cfg.vlan_info = *vlan_info; + vport->port_base_vlan_cfg.tbl_sta = true; + hclge_set_vport_vlan_fltr_change(vport); + + return 0; +} + +static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport, + enum hnae3_port_base_vlan_state state, + u16 vlan, u8 qos) +{ + if (state == HNAE3_PORT_BASE_VLAN_DISABLE) { + if (!vlan && !qos) + return HNAE3_PORT_BASE_VLAN_NOCHANGE; + + return HNAE3_PORT_BASE_VLAN_ENABLE; + } + + if (!vlan && !qos) + return HNAE3_PORT_BASE_VLAN_DISABLE; + + if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan && + vport->port_base_vlan_cfg.vlan_info.qos == qos) + return HNAE3_PORT_BASE_VLAN_NOCHANGE; + + return HNAE3_PORT_BASE_VLAN_MODIFY; +} + +static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid, + u16 vlan, u8 qos, __be16 proto) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hclge_vlan_info vlan_info; + u16 state; + int ret; + + if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) + return -EOPNOTSUPP; + + vport = hclge_get_vf_vport(hdev, vfid); + if (!vport) + return -EINVAL; + + /* qos is a 3 bits value, so can not be bigger than 7 */ + if (vlan > VLAN_N_VID - 1 || qos > 7) + return -EINVAL; + if (proto != htons(ETH_P_8021Q)) + return -EPROTONOSUPPORT; + + state = hclge_get_port_base_vlan_state(vport, + vport->port_base_vlan_cfg.state, + vlan, qos); + if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE) + return 0; + + vlan_info.vlan_tag = vlan; + vlan_info.qos = qos; + vlan_info.vlan_proto = ntohs(proto); + + ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to update port base vlan for vf %d, ret = %d\n", + vfid, ret); + return ret; + } + + /* there is a timewindow for PF to know VF unalive, it may + * cause send mailbox fail, but it doesn't matter, VF will + * query it when reinit. + * for DEVICE_VERSION_V3, vf doesn't need to know about the port based + * VLAN state. + */ + if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) { + if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) + (void)hclge_push_vf_port_base_vlan_info(&hdev->vport[0], + vport->vport_id, + state, + &vlan_info); + else + set_bit(HCLGE_VPORT_NEED_NOTIFY_VF_VLAN, + &vport->need_notify); + } + return 0; +} + +static void hclge_clear_vf_vlan(struct hclge_dev *hdev) +{ + struct hclge_vlan_info *vlan_info; + struct hclge_vport *vport; + int ret; + int vf; + + /* clear port base vlan for all vf */ + for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) { + vport = &hdev->vport[vf]; + vlan_info = &vport->port_base_vlan_cfg.vlan_info; + + ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), + vport->vport_id, + vlan_info->vlan_tag, true); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to clear vf vlan for vf%d, ret = %d\n", + vf - HCLGE_VF_VPORT_START_NUM, ret); + } +} + +int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, + u16 vlan_id, bool is_kill) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + bool writen_to_tbl = false; + int ret = 0; + + /* When device is resetting or reset failed, firmware is unable to + * handle mailbox. Just record the vlan id, and remove it after + * reset finished. + */ + mutex_lock(&hdev->vport_lock); + if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || + test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) { + set_bit(vlan_id, vport->vlan_del_fail_bmap); + mutex_unlock(&hdev->vport_lock); + return -EBUSY; + } else if (!is_kill && test_bit(vlan_id, vport->vlan_del_fail_bmap)) { + clear_bit(vlan_id, vport->vlan_del_fail_bmap); + } + mutex_unlock(&hdev->vport_lock); + + /* when port base vlan enabled, we use port base vlan as the vlan + * filter entry. In this case, we don't update vlan filter table + * when user add new vlan or remove exist vlan, just update the vport + * vlan list. The vlan id in vlan list will be writen in vlan filter + * table until port base vlan disabled + */ + if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) { + ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, + vlan_id, is_kill); + writen_to_tbl = true; + } + + if (!ret) { + if (!is_kill) { + hclge_add_vport_vlan_table(vport, vlan_id, + writen_to_tbl); + } else if (is_kill && vlan_id != 0) { + mutex_lock(&hdev->vport_lock); + hclge_rm_vport_vlan_table(vport, vlan_id, false); + mutex_unlock(&hdev->vport_lock); + } + } else if (is_kill) { + /* when remove hw vlan filter failed, record the vlan id, + * and try to remove it from hw later, to be consistence + * with stack + */ + mutex_lock(&hdev->vport_lock); + set_bit(vlan_id, vport->vlan_del_fail_bmap); + mutex_unlock(&hdev->vport_lock); + } + + hclge_set_vport_vlan_fltr_change(vport); + + return ret; +} + +static void hclge_sync_vlan_fltr_state(struct hclge_dev *hdev) +{ + struct hclge_vport *vport; + int ret; + u16 i; + + for (i = 0; i < hdev->num_alloc_vport; i++) { + vport = &hdev->vport[i]; + if (!test_and_clear_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, + &vport->state)) + continue; + + ret = hclge_enable_vport_vlan_filter(vport, + vport->req_vlan_fltr_en); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to sync vlan filter state for vport%u, ret = %d\n", + vport->vport_id, ret); + set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, + &vport->state); + return; + } + } +} + +static void hclge_sync_vlan_filter(struct hclge_dev *hdev) +{ +#define HCLGE_MAX_SYNC_COUNT 60 + + int i, ret, sync_cnt = 0; + u16 vlan_id; + + mutex_lock(&hdev->vport_lock); + /* start from vport 1 for PF is always alive */ + for (i = 0; i < hdev->num_alloc_vport; i++) { + struct hclge_vport *vport = &hdev->vport[i]; + + vlan_id = find_first_bit(vport->vlan_del_fail_bmap, + VLAN_N_VID); + while (vlan_id != VLAN_N_VID) { + ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), + vport->vport_id, vlan_id, + true); + if (ret && ret != -EINVAL) { + mutex_unlock(&hdev->vport_lock); + return; + } + + clear_bit(vlan_id, vport->vlan_del_fail_bmap); + hclge_rm_vport_vlan_table(vport, vlan_id, false); + hclge_set_vport_vlan_fltr_change(vport); + + sync_cnt++; + if (sync_cnt >= HCLGE_MAX_SYNC_COUNT) { + mutex_unlock(&hdev->vport_lock); + return; + } + + vlan_id = find_first_bit(vport->vlan_del_fail_bmap, + VLAN_N_VID); + } + } + mutex_unlock(&hdev->vport_lock); + + hclge_sync_vlan_fltr_state(hdev); +} + +static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps) +{ + struct hclge_config_max_frm_size_cmd *req; + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false); + + req = (struct hclge_config_max_frm_size_cmd *)desc.data; + req->max_frm_size = cpu_to_le16(new_mps); + req->min_frm_size = HCLGE_MAC_MIN_FRAME; + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + + return hclge_set_vport_mtu(vport, new_mtu); +} + +int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu) +{ + struct hclge_dev *hdev = vport->back; + int i, max_frm_size, ret; + + /* HW supprt 2 layer vlan */ + max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN; + if (max_frm_size < HCLGE_MAC_MIN_FRAME || + max_frm_size > hdev->ae_dev->dev_specs.max_frm_size) + return -EINVAL; + + max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME); + mutex_lock(&hdev->vport_lock); + /* VF's mps must fit within hdev->mps */ + if (vport->vport_id && max_frm_size > hdev->mps) { + mutex_unlock(&hdev->vport_lock); + return -EINVAL; + } else if (vport->vport_id) { + vport->mps = max_frm_size; + mutex_unlock(&hdev->vport_lock); + return 0; + } + + /* PF's mps must be greater then VF's mps */ + for (i = 1; i < hdev->num_alloc_vport; i++) + if (max_frm_size < hdev->vport[i].mps) { + dev_err(&hdev->pdev->dev, + "failed to set pf mtu for less than vport %d, mps = %u.\n", + i, hdev->vport[i].mps); + mutex_unlock(&hdev->vport_lock); + return -EINVAL; + } + + hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); + + ret = hclge_set_mac_mtu(hdev, max_frm_size); + if (ret) { + dev_err(&hdev->pdev->dev, + "Change mtu fail, ret =%d\n", ret); + goto out; + } + + hdev->mps = max_frm_size; + vport->mps = max_frm_size; + + ret = hclge_buffer_alloc(hdev); + if (ret) + dev_err(&hdev->pdev->dev, + "Allocate buffer fail, ret =%d\n", ret); + +out: + hclge_notify_client(hdev, HNAE3_UP_CLIENT); + mutex_unlock(&hdev->vport_lock); + return ret; +} + +static int hclge_reset_tqp_cmd_send(struct hclge_dev *hdev, u16 queue_id, + bool enable) +{ + struct hclge_reset_tqp_queue_cmd *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false); + + req = (struct hclge_reset_tqp_queue_cmd *)desc.data; + req->tqp_id = cpu_to_le16(queue_id); + if (enable) + hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Send tqp reset cmd error, status =%d\n", ret); + return ret; + } + + return 0; +} + +static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id, + u8 *reset_status) +{ + struct hclge_reset_tqp_queue_cmd *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true); + + req = (struct hclge_reset_tqp_queue_cmd *)desc.data; + req->tqp_id = cpu_to_le16(queue_id); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Get reset status error, status =%d\n", ret); + return ret; + } + + *reset_status = hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B); + + return 0; +} + +u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id) +{ + struct hclge_comm_tqp *tqp; + struct hnae3_queue *queue; + + queue = handle->kinfo.tqp[queue_id]; + tqp = container_of(queue, struct hclge_comm_tqp, q); + + return tqp->index; +} + +static int hclge_reset_tqp_cmd(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + u16 reset_try_times = 0; + u8 reset_status; + u16 queue_gid; + int ret; + u16 i; + + for (i = 0; i < handle->kinfo.num_tqps; i++) { + queue_gid = hclge_covert_handle_qid_global(handle, i); + ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, true); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to send reset tqp cmd, ret = %d\n", + ret); + return ret; + } + + while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) { + ret = hclge_get_reset_status(hdev, queue_gid, + &reset_status); + if (ret) + return ret; + + if (reset_status) + break; + + /* Wait for tqp hw reset */ + usleep_range(1000, 1200); + } + + if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) { + dev_err(&hdev->pdev->dev, + "wait for tqp hw reset timeout\n"); + return -ETIME; + } + + ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, false); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to deassert soft reset, ret = %d\n", + ret); + return ret; + } + reset_try_times = 0; + } + return 0; +} + +static int hclge_reset_rcb(struct hnae3_handle *handle) +{ +#define HCLGE_RESET_RCB_NOT_SUPPORT 0U +#define HCLGE_RESET_RCB_SUCCESS 1U + + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hclge_reset_cmd *req; + struct hclge_desc desc; + u8 return_status; + u16 queue_gid; + int ret; + + queue_gid = hclge_covert_handle_qid_global(handle, 0); + + req = (struct hclge_reset_cmd *)desc.data; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false); + hnae3_set_bit(req->fun_reset_rcb, HCLGE_CFG_RESET_RCB_B, 1); + req->fun_reset_rcb_vqid_start = cpu_to_le16(queue_gid); + req->fun_reset_rcb_vqid_num = cpu_to_le16(handle->kinfo.num_tqps); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to send rcb reset cmd, ret = %d\n", ret); + return ret; + } + + return_status = req->fun_reset_rcb_return_status; + if (return_status == HCLGE_RESET_RCB_SUCCESS) + return 0; + + if (return_status != HCLGE_RESET_RCB_NOT_SUPPORT) { + dev_err(&hdev->pdev->dev, "failed to reset rcb, ret = %u\n", + return_status); + return -EIO; + } + + /* if reset rcb cmd is unsupported, we need to send reset tqp cmd + * again to reset all tqps + */ + return hclge_reset_tqp_cmd(handle); +} + +int hclge_reset_tqp(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + int ret; + + /* only need to disable PF's tqp */ + if (!vport->vport_id) { + ret = hclge_tqp_enable(handle, false); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to disable tqp, ret = %d\n", ret); + return ret; + } + } + + return hclge_reset_rcb(handle); +} + +static u32 hclge_get_fw_version(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + return hdev->fw_version; +} + +static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) +{ + struct phy_device *phydev = hdev->hw.mac.phydev; + + if (!phydev) + return; + + phy_set_asym_pause(phydev, rx_en, tx_en); +} + +static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) +{ + int ret; + + if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) + return 0; + + ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en); + if (ret) + dev_err(&hdev->pdev->dev, + "configure pauseparam error, ret = %d.\n", ret); + + return ret; +} + +int hclge_cfg_flowctrl(struct hclge_dev *hdev) +{ + struct phy_device *phydev = hdev->hw.mac.phydev; + u16 remote_advertising = 0; + u16 local_advertising; + u32 rx_pause, tx_pause; + u8 flowctl; + + if (!phydev->link) + return 0; + + if (!phydev->autoneg) + return hclge_mac_pause_setup_hw(hdev); + + local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising); + + if (phydev->pause) + remote_advertising = LPA_PAUSE_CAP; + + if (phydev->asym_pause) + remote_advertising |= LPA_PAUSE_ASYM; + + flowctl = mii_resolve_flowctrl_fdx(local_advertising, + remote_advertising); + tx_pause = flowctl & FLOW_CTRL_TX; + rx_pause = flowctl & FLOW_CTRL_RX; + + if (phydev->duplex == HCLGE_MAC_HALF) { + tx_pause = 0; + rx_pause = 0; + } + + return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause); +} + +static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg, + u32 *rx_en, u32 *tx_en) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + u8 media_type = hdev->hw.mac.media_type; + + *auto_neg = (media_type == HNAE3_MEDIA_TYPE_COPPER) ? + hclge_get_autoneg(handle) : 0; + + if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { + *rx_en = 0; + *tx_en = 0; + return; + } + + if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) { + *rx_en = 1; + *tx_en = 0; + } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) { + *tx_en = 1; + *rx_en = 0; + } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) { + *rx_en = 1; + *tx_en = 1; + } else { + *rx_en = 0; + *tx_en = 0; + } +} + +static void hclge_record_user_pauseparam(struct hclge_dev *hdev, + u32 rx_en, u32 tx_en) +{ + if (rx_en && tx_en) + hdev->fc_mode_last_time = HCLGE_FC_FULL; + else if (rx_en && !tx_en) + hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE; + else if (!rx_en && tx_en) + hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE; + else + hdev->fc_mode_last_time = HCLGE_FC_NONE; + + hdev->tm_info.fc_mode = hdev->fc_mode_last_time; +} + +static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg, + u32 rx_en, u32 tx_en) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct phy_device *phydev = hdev->hw.mac.phydev; + u32 fc_autoneg; + + if (phydev || hnae3_dev_phy_imp_supported(hdev)) { + fc_autoneg = hclge_get_autoneg(handle); + if (auto_neg != fc_autoneg) { + dev_info(&hdev->pdev->dev, + "To change autoneg please use: ethtool -s autoneg \n"); + return -EOPNOTSUPP; + } + } + + if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { + dev_info(&hdev->pdev->dev, + "Priority flow control enabled. Cannot set link flow control.\n"); + return -EOPNOTSUPP; + } + + hclge_set_flowctrl_adv(hdev, rx_en, tx_en); + + hclge_record_user_pauseparam(hdev, rx_en, tx_en); + + if (!auto_neg || hnae3_dev_phy_imp_supported(hdev)) + return hclge_cfg_pauseparam(hdev, rx_en, tx_en); + + if (phydev) + return phy_start_aneg(phydev); + + return -EOPNOTSUPP; +} + +static void hclge_get_ksettings_an_result(struct hnae3_handle *handle, + u8 *auto_neg, u32 *speed, u8 *duplex, u32 *lane_num) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + if (speed) + *speed = hdev->hw.mac.speed; + if (duplex) + *duplex = hdev->hw.mac.duplex; + if (auto_neg) + *auto_neg = hdev->hw.mac.autoneg; + if (lane_num) + *lane_num = hdev->hw.mac.lane_num; +} + +static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type, + u8 *module_type) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + /* When nic is down, the service task is not running, doesn't update + * the port information per second. Query the port information before + * return the media type, ensure getting the correct media information. + */ + hclge_update_port_info(hdev); + + if (media_type) + *media_type = hdev->hw.mac.media_type; + + if (module_type) + *module_type = hdev->hw.mac.module_type; +} + +static void hclge_get_mdix_mode(struct hnae3_handle *handle, + u8 *tp_mdix_ctrl, u8 *tp_mdix) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct phy_device *phydev = hdev->hw.mac.phydev; + int mdix_ctrl, mdix, is_resolved; + unsigned int retval; + + if (!phydev) { + *tp_mdix_ctrl = ETH_TP_MDI_INVALID; + *tp_mdix = ETH_TP_MDI_INVALID; + return; + } + + phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX); + + retval = phy_read(phydev, HCLGE_PHY_CSC_REG); + mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M, + HCLGE_PHY_MDIX_CTRL_S); + + retval = phy_read(phydev, HCLGE_PHY_CSS_REG); + mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B); + is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B); + + phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER); + + switch (mdix_ctrl) { + case 0x0: + *tp_mdix_ctrl = ETH_TP_MDI; + break; + case 0x1: + *tp_mdix_ctrl = ETH_TP_MDI_X; + break; + case 0x3: + *tp_mdix_ctrl = ETH_TP_MDI_AUTO; + break; + default: + *tp_mdix_ctrl = ETH_TP_MDI_INVALID; + break; + } + + if (!is_resolved) + *tp_mdix = ETH_TP_MDI_INVALID; + else if (mdix) + *tp_mdix = ETH_TP_MDI_X; + else + *tp_mdix = ETH_TP_MDI; +} + +static void hclge_info_show(struct hclge_dev *hdev) +{ + struct hnae3_handle *handle = &hdev->vport->nic; + struct device *dev = &hdev->pdev->dev; + + dev_info(dev, "PF info begin:\n"); + + dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps); + dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc); + dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc); + dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport); + dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs); + dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map); + dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size); + dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size); + dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size); + dev_info(dev, "This is %s PF\n", + hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main"); + dev_info(dev, "DCB %s\n", + handle->kinfo.tc_info.dcb_ets_active ? "enable" : "disable"); + dev_info(dev, "MQPRIO %s\n", + handle->kinfo.tc_info.mqprio_active ? "enable" : "disable"); + dev_info(dev, "Default tx spare buffer size: %u\n", + hdev->tx_spare_buf_size); + + dev_info(dev, "PF info end.\n"); +} + +static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev, + struct hclge_vport *vport) +{ + struct hnae3_client *client = vport->nic.client; + struct hclge_dev *hdev = ae_dev->priv; + int rst_cnt = hdev->rst_stats.reset_cnt; + int ret; + + ret = client->ops->init_instance(&vport->nic); + if (ret) + return ret; + + set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state); + if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || + rst_cnt != hdev->rst_stats.reset_cnt) { + ret = -EBUSY; + goto init_nic_err; + } + + /* Enable nic hw error interrupts */ + ret = hclge_config_nic_hw_error(hdev, true); + if (ret) { + dev_err(&ae_dev->pdev->dev, + "fail(%d) to enable hw error interrupts\n", ret); + goto init_nic_err; + } + + hnae3_set_client_init_flag(client, ae_dev, 1); + + if (netif_msg_drv(&hdev->vport->nic)) + hclge_info_show(hdev); + + return ret; + +init_nic_err: + clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state); + while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) + msleep(HCLGE_WAIT_RESET_DONE); + + client->ops->uninit_instance(&vport->nic, 0); + + return ret; +} + +static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev, + struct hclge_vport *vport) +{ + struct hclge_dev *hdev = ae_dev->priv; + struct hnae3_client *client; + int rst_cnt; + int ret; + + if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client || + !hdev->nic_client) + return 0; + + client = hdev->roce_client; + ret = hclge_init_roce_base_info(vport); + if (ret) + return ret; + + rst_cnt = hdev->rst_stats.reset_cnt; + ret = client->ops->init_instance(&vport->roce); + if (ret) + return ret; + + set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state); + if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || + rst_cnt != hdev->rst_stats.reset_cnt) { + ret = -EBUSY; + goto init_roce_err; + } + + /* Enable roce ras interrupts */ + ret = hclge_config_rocee_ras_interrupt(hdev, true); + if (ret) { + dev_err(&ae_dev->pdev->dev, + "fail(%d) to enable roce ras interrupts\n", ret); + goto init_roce_err; + } + + hnae3_set_client_init_flag(client, ae_dev, 1); + + return 0; + +init_roce_err: + clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state); + while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) + msleep(HCLGE_WAIT_RESET_DONE); + + hdev->roce_client->ops->uninit_instance(&vport->roce, 0); + + return ret; +} + +static int hclge_init_client_instance(struct hnae3_client *client, + struct hnae3_ae_dev *ae_dev) +{ + struct hclge_dev *hdev = ae_dev->priv; + struct hclge_vport *vport = &hdev->vport[0]; + int ret; + + switch (client->type) { + case HNAE3_CLIENT_KNIC: + hdev->nic_client = client; + vport->nic.client = client; + ret = hclge_init_nic_client_instance(ae_dev, vport); + if (ret) + goto clear_nic; + + ret = hclge_init_roce_client_instance(ae_dev, vport); + if (ret) + goto clear_roce; + + break; + case HNAE3_CLIENT_ROCE: + if (hnae3_dev_roce_supported(hdev)) { + hdev->roce_client = client; + vport->roce.client = client; + } + + ret = hclge_init_roce_client_instance(ae_dev, vport); + if (ret) + goto clear_roce; + + break; + default: + return -EINVAL; + } + + return 0; + +clear_nic: + hdev->nic_client = NULL; + vport->nic.client = NULL; + return ret; +clear_roce: + hdev->roce_client = NULL; + vport->roce.client = NULL; + return ret; +} + +static void hclge_uninit_client_instance(struct hnae3_client *client, + struct hnae3_ae_dev *ae_dev) +{ + struct hclge_dev *hdev = ae_dev->priv; + struct hclge_vport *vport = &hdev->vport[0]; + + if (hdev->roce_client) { + clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state); + while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) + msleep(HCLGE_WAIT_RESET_DONE); + + hdev->roce_client->ops->uninit_instance(&vport->roce, 0); + hdev->roce_client = NULL; + vport->roce.client = NULL; + } + if (client->type == HNAE3_CLIENT_ROCE) + return; + if (hdev->nic_client && client->ops->uninit_instance) { + clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state); + while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) + msleep(HCLGE_WAIT_RESET_DONE); + + client->ops->uninit_instance(&vport->nic, 0); + hdev->nic_client = NULL; + vport->nic.client = NULL; + } +} + +static int hclge_dev_mem_map(struct hclge_dev *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + struct hclge_hw *hw = &hdev->hw; + + /* for device does not have device memory, return directly */ + if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR))) + return 0; + + hw->hw.mem_base = + devm_ioremap_wc(&pdev->dev, + pci_resource_start(pdev, HCLGE_MEM_BAR), + pci_resource_len(pdev, HCLGE_MEM_BAR)); + if (!hw->hw.mem_base) { + dev_err(&pdev->dev, "failed to map device memory\n"); + return -EFAULT; + } + + return 0; +} + +static int hclge_pci_init(struct hclge_dev *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + struct hclge_hw *hw; + int ret; + + ret = pci_enable_device(pdev); + if (ret) { + dev_err(&pdev->dev, "failed to enable PCI device\n"); + return ret; + } + + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (ret) { + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (ret) { + dev_err(&pdev->dev, + "can't set consistent PCI DMA"); + goto err_disable_device; + } + dev_warn(&pdev->dev, "set DMA mask to 32 bits\n"); + } + + ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME); + if (ret) { + dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); + goto err_disable_device; + } + + pci_set_master(pdev); + hw = &hdev->hw; + hw->hw.io_base = pcim_iomap(pdev, 2, 0); + if (!hw->hw.io_base) { + dev_err(&pdev->dev, "Can't map configuration register space\n"); + ret = -ENOMEM; + goto err_clr_master; + } + + ret = hclge_dev_mem_map(hdev); + if (ret) + goto err_unmap_io_base; + + hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev); + + return 0; + +err_unmap_io_base: + pcim_iounmap(pdev, hdev->hw.hw.io_base); +err_clr_master: + pci_clear_master(pdev); + pci_release_regions(pdev); +err_disable_device: + pci_disable_device(pdev); + + return ret; +} + +static void hclge_pci_uninit(struct hclge_dev *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + + if (hdev->hw.hw.mem_base) + devm_iounmap(&pdev->dev, hdev->hw.hw.mem_base); + + pcim_iounmap(pdev, hdev->hw.hw.io_base); + pci_free_irq_vectors(pdev); + pci_clear_master(pdev); + pci_release_mem_regions(pdev); + pci_disable_device(pdev); +} + +static void hclge_state_init(struct hclge_dev *hdev) +{ + set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state); + set_bit(HCLGE_STATE_DOWN, &hdev->state); + clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state); + clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); + clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state); + clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state); + clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); +} + +static void hclge_state_uninit(struct hclge_dev *hdev) +{ + set_bit(HCLGE_STATE_DOWN, &hdev->state); + set_bit(HCLGE_STATE_REMOVING, &hdev->state); + + if (hdev->reset_timer.function) + del_timer_sync(&hdev->reset_timer); + if (hdev->service_task.work.func) + cancel_delayed_work_sync(&hdev->service_task); +} + +static void hclge_reset_prepare_general(struct hnae3_ae_dev *ae_dev, + enum hnae3_reset_type rst_type) +{ +#define HCLGE_RESET_RETRY_WAIT_MS 500 +#define HCLGE_RESET_RETRY_CNT 5 + + struct hclge_dev *hdev = ae_dev->priv; + int retry_cnt = 0; + int ret; + + while (retry_cnt++ < HCLGE_RESET_RETRY_CNT) { + down(&hdev->reset_sem); + set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); + hdev->reset_type = rst_type; + ret = hclge_reset_prepare(hdev); + if (!ret && !hdev->reset_pending) + break; + + dev_err(&hdev->pdev->dev, + "failed to prepare to reset, ret=%d, reset_pending:0x%lx, retry_cnt:%d\n", + ret, hdev->reset_pending, retry_cnt); + clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); + up(&hdev->reset_sem); + msleep(HCLGE_RESET_RETRY_WAIT_MS); + } + + /* disable misc vector before reset done */ + hclge_enable_vector(&hdev->misc_vector, false); + set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); + + if (hdev->reset_type == HNAE3_FLR_RESET) + hdev->rst_stats.flr_rst_cnt++; +} + +static void hclge_reset_done(struct hnae3_ae_dev *ae_dev) +{ + struct hclge_dev *hdev = ae_dev->priv; + int ret; + + hclge_enable_vector(&hdev->misc_vector, true); + + ret = hclge_reset_rebuild(hdev); + if (ret) + dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret); + + hdev->reset_type = HNAE3_NONE_RESET; + clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); + up(&hdev->reset_sem); +} + +static void hclge_clear_resetting_state(struct hclge_dev *hdev) +{ + u16 i; + + for (i = 0; i < hdev->num_alloc_vport; i++) { + struct hclge_vport *vport = &hdev->vport[i]; + int ret; + + /* Send cmd to clear vport's FUNC_RST_ING */ + ret = hclge_set_vf_rst(hdev, vport->vport_id, false); + if (ret) + dev_warn(&hdev->pdev->dev, + "clear vport(%u) rst failed %d!\n", + vport->vport_id, ret); + } +} + +static int hclge_clear_hw_resource(struct hclge_dev *hdev) +{ + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CLEAR_HW_RESOURCE, false); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + /* This new command is only supported by new firmware, it will + * fail with older firmware. Error value -EOPNOSUPP can only be + * returned by older firmware running this command, to keep code + * backward compatible we will override this value and return + * success. + */ + if (ret && ret != -EOPNOTSUPP) { + dev_err(&hdev->pdev->dev, + "failed to clear hw resource, ret = %d\n", ret); + return ret; + } + return 0; +} + +static void hclge_init_rxd_adv_layout(struct hclge_dev *hdev) +{ + if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev)) + hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 1); +} + +static void hclge_uninit_rxd_adv_layout(struct hclge_dev *hdev) +{ + if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev)) + hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 0); +} + +static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) +{ + struct pci_dev *pdev = ae_dev->pdev; + struct hclge_dev *hdev; + int ret; + + hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); + if (!hdev) + return -ENOMEM; + + hdev->pdev = pdev; + hdev->ae_dev = ae_dev; + hdev->reset_type = HNAE3_NONE_RESET; + hdev->reset_level = HNAE3_FUNC_RESET; + ae_dev->priv = hdev; + + /* HW supprt 2 layer vlan */ + hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN; + + mutex_init(&hdev->vport_lock); + spin_lock_init(&hdev->fd_rule_lock); + sema_init(&hdev->reset_sem, 1); + + ret = hclge_pci_init(hdev); + if (ret) + goto out; + + ret = hclge_devlink_init(hdev); + if (ret) + goto err_pci_uninit; + + /* Firmware command queue initialize */ + ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw); + if (ret) + goto err_devlink_uninit; + + /* Firmware command initialize */ + ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, &hdev->fw_version, + true, hdev->reset_pending); + if (ret) + goto err_cmd_uninit; + + ret = hclge_clear_hw_resource(hdev); + if (ret) + goto err_cmd_uninit; + + ret = hclge_get_cap(hdev); + if (ret) + goto err_cmd_uninit; + + ret = hclge_query_dev_specs(hdev); + if (ret) { + dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n", + ret); + goto err_cmd_uninit; + } + + ret = hclge_configure(hdev); + if (ret) { + dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret); + goto err_cmd_uninit; + } + + ret = hclge_init_msi(hdev); + if (ret) { + dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret); + goto err_cmd_uninit; + } + + ret = hclge_misc_irq_init(hdev); + if (ret) + goto err_msi_uninit; + + ret = hclge_alloc_tqps(hdev); + if (ret) { + dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret); + goto err_msi_irq_uninit; + } + + ret = hclge_alloc_vport(hdev); + if (ret) + goto err_msi_irq_uninit; + + ret = hclge_map_tqp(hdev); + if (ret) + goto err_msi_irq_uninit; + + if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) { + clear_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps); + if (hnae3_dev_phy_imp_supported(hdev)) + ret = hclge_update_tp_port_info(hdev); + else + ret = hclge_mac_mdio_config(hdev); + + if (ret) + goto err_msi_irq_uninit; + } + + ret = hclge_init_umv_space(hdev); + if (ret) + goto err_mdiobus_unreg; + + ret = hclge_mac_init(hdev); + if (ret) { + dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); + goto err_mdiobus_unreg; + } + + ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); + if (ret) { + dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret); + goto err_mdiobus_unreg; + } + + ret = hclge_config_gro(hdev); + if (ret) + goto err_mdiobus_unreg; + + ret = hclge_init_vlan_config(hdev); + if (ret) { + dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); + goto err_mdiobus_unreg; + } + + ret = hclge_tm_schd_init(hdev); + if (ret) { + dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret); + goto err_mdiobus_unreg; + } + + ret = hclge_comm_rss_init_cfg(&hdev->vport->nic, hdev->ae_dev, + &hdev->rss_cfg); + if (ret) { + dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret); + goto err_mdiobus_unreg; + } + + ret = hclge_rss_init_hw(hdev); + if (ret) { + dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret); + goto err_mdiobus_unreg; + } + + ret = init_mgr_tbl(hdev); + if (ret) { + dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret); + goto err_mdiobus_unreg; + } + + ret = hclge_init_fd_config(hdev); + if (ret) { + dev_err(&pdev->dev, + "fd table init fail, ret=%d\n", ret); + goto err_mdiobus_unreg; + } + + ret = hclge_ptp_init(hdev); + if (ret) + goto err_mdiobus_unreg; + + ret = hclge_update_port_info(hdev); + if (ret) + goto err_mdiobus_unreg; + + INIT_KFIFO(hdev->mac_tnl_log); + + hclge_dcb_ops_set(hdev); + + timer_setup(&hdev->reset_timer, hclge_reset_timer, 0); + INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task); + + hclge_clear_all_event_cause(hdev); + hclge_clear_resetting_state(hdev); + + /* Log and clear the hw errors those already occurred */ + if (hnae3_dev_ras_imp_supported(hdev)) + hclge_handle_occurred_error(hdev); + else + hclge_handle_all_hns_hw_errors(ae_dev); + + /* request delayed reset for the error recovery because an immediate + * global reset on a PF affecting pending initialization of other PFs + */ + if (ae_dev->hw_err_reset_req) { + enum hnae3_reset_type reset_level; + + reset_level = hclge_get_reset_level(ae_dev, + &ae_dev->hw_err_reset_req); + hclge_set_def_reset_request(ae_dev, reset_level); + mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL); + } + + hclge_init_rxd_adv_layout(hdev); + + /* Enable MISC vector(vector0) */ + hclge_enable_vector(&hdev->misc_vector, true); + + hclge_state_init(hdev); + hdev->last_reset_time = jiffies; + + dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n", + HCLGE_DRIVER_NAME); + + hclge_task_schedule(hdev, round_jiffies_relative(HZ)); + + return 0; + +err_mdiobus_unreg: + if (hdev->hw.mac.phydev) + mdiobus_unregister(hdev->hw.mac.mdio_bus); +err_msi_irq_uninit: + hclge_misc_irq_uninit(hdev); +err_msi_uninit: + pci_free_irq_vectors(pdev); +err_cmd_uninit: + hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw); +err_devlink_uninit: + hclge_devlink_uninit(hdev); +err_pci_uninit: + pcim_iounmap(pdev, hdev->hw.hw.io_base); + pci_clear_master(pdev); + pci_release_regions(pdev); + pci_disable_device(pdev); +out: + mutex_destroy(&hdev->vport_lock); + return ret; +} + +static void hclge_stats_clear(struct hclge_dev *hdev) +{ + memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats)); + memset(&hdev->fec_stats, 0, sizeof(hdev->fec_stats)); +} + +static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable) +{ + return hclge_config_switch_param(hdev, vf, enable, + HCLGE_SWITCH_ANTI_SPOOF_MASK); +} + +static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable) +{ + return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, + HCLGE_FILTER_FE_NIC_INGRESS_B, + enable, vf); +} + +static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable) +{ + int ret; + + ret = hclge_set_mac_spoofchk(hdev, vf, enable); + if (ret) { + dev_err(&hdev->pdev->dev, + "Set vf %d mac spoof check %s failed, ret=%d\n", + vf, enable ? "on" : "off", ret); + return ret; + } + + ret = hclge_set_vlan_spoofchk(hdev, vf, enable); + if (ret) + dev_err(&hdev->pdev->dev, + "Set vf %d vlan spoof check %s failed, ret=%d\n", + vf, enable ? "on" : "off", ret); + + return ret; +} + +static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf, + bool enable) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + u32 new_spoofchk = enable ? 1 : 0; + int ret; + + if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) + return -EOPNOTSUPP; + + vport = hclge_get_vf_vport(hdev, vf); + if (!vport) + return -EINVAL; + + if (vport->vf_info.spoofchk == new_spoofchk) + return 0; + + if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full)) + dev_warn(&hdev->pdev->dev, + "vf %d vlan table is full, enable spoof check may cause its packet send fail\n", + vf); + else if (enable && hclge_is_umv_space_full(vport, true)) + dev_warn(&hdev->pdev->dev, + "vf %d mac table is full, enable spoof check may cause its packet send fail\n", + vf); + + ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable); + if (ret) + return ret; + + vport->vf_info.spoofchk = new_spoofchk; + return 0; +} + +static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = hdev->vport; + int ret; + int i; + + if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) + return 0; + + /* resume the vf spoof check state after reset */ + for (i = 0; i < hdev->num_alloc_vport; i++) { + ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, + vport->vf_info.spoofchk); + if (ret) + return ret; + + vport++; + } + + return 0; +} + +static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + u32 new_trusted = enable ? 1 : 0; + + vport = hclge_get_vf_vport(hdev, vf); + if (!vport) + return -EINVAL; + + if (vport->vf_info.trusted == new_trusted) + return 0; + + vport->vf_info.trusted = new_trusted; + set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state); + hclge_task_schedule(hdev, 0); + + return 0; +} + +static void hclge_reset_vf_rate(struct hclge_dev *hdev) +{ + int ret; + int vf; + + /* reset vf rate to default value */ + for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) { + struct hclge_vport *vport = &hdev->vport[vf]; + + vport->vf_info.max_tx_rate = 0; + ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate); + if (ret) + dev_err(&hdev->pdev->dev, + "vf%d failed to reset to default, ret=%d\n", + vf - HCLGE_VF_VPORT_START_NUM, ret); + } +} + +static int hclge_vf_rate_param_check(struct hclge_dev *hdev, + int min_tx_rate, int max_tx_rate) +{ + if (min_tx_rate != 0 || + max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) { + dev_err(&hdev->pdev->dev, + "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n", + min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed); + return -EINVAL; + } + + return 0; +} + +static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf, + int min_tx_rate, int max_tx_rate, bool force) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + int ret; + + ret = hclge_vf_rate_param_check(hdev, min_tx_rate, max_tx_rate); + if (ret) + return ret; + + vport = hclge_get_vf_vport(hdev, vf); + if (!vport) + return -EINVAL; + + if (!force && max_tx_rate == vport->vf_info.max_tx_rate) + return 0; + + ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate); + if (ret) + return ret; + + vport->vf_info.max_tx_rate = max_tx_rate; + + return 0; +} + +static int hclge_resume_vf_rate(struct hclge_dev *hdev) +{ + struct hnae3_handle *handle = &hdev->vport->nic; + struct hclge_vport *vport; + int ret; + int vf; + + /* resume the vf max_tx_rate after reset */ + for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) { + vport = hclge_get_vf_vport(hdev, vf); + if (!vport) + return -EINVAL; + + /* zero means max rate, after reset, firmware already set it to + * max rate, so just continue. + */ + if (!vport->vf_info.max_tx_rate) + continue; + + ret = hclge_set_vf_rate(handle, vf, 0, + vport->vf_info.max_tx_rate, true); + if (ret) { + dev_err(&hdev->pdev->dev, + "vf%d failed to resume tx_rate:%u, ret=%d\n", + vf, vport->vf_info.max_tx_rate, ret); + return ret; + } + } + + return 0; +} + +static void hclge_reset_vport_state(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = hdev->vport; + int i; + + for (i = 0; i < hdev->num_alloc_vport; i++) { + clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); + vport++; + } +} + +static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) +{ + struct hclge_dev *hdev = ae_dev->priv; + struct pci_dev *pdev = ae_dev->pdev; + int ret; + + set_bit(HCLGE_STATE_DOWN, &hdev->state); + + hclge_stats_clear(hdev); + /* NOTE: pf reset needn't to clear or restore pf and vf table entry. + * so here should not clean table in memory. + */ + if (hdev->reset_type == HNAE3_IMP_RESET || + hdev->reset_type == HNAE3_GLOBAL_RESET) { + memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table)); + memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full)); + bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport); + hclge_reset_umv_space(hdev); + } + + ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, &hdev->fw_version, + true, hdev->reset_pending); + if (ret) { + dev_err(&pdev->dev, "Cmd queue init failed\n"); + return ret; + } + + ret = hclge_map_tqp(hdev); + if (ret) { + dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret); + return ret; + } + + ret = hclge_mac_init(hdev); + if (ret) { + dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); + return ret; + } + + ret = hclge_tp_port_init(hdev); + if (ret) { + dev_err(&pdev->dev, "failed to init tp port, ret = %d\n", + ret); + return ret; + } + + ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); + if (ret) { + dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret); + return ret; + } + + ret = hclge_config_gro(hdev); + if (ret) + return ret; + + ret = hclge_init_vlan_config(hdev); + if (ret) { + dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); + return ret; + } + + ret = hclge_tm_init_hw(hdev, true); + if (ret) { + dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret); + return ret; + } + + ret = hclge_rss_init_hw(hdev); + if (ret) { + dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret); + return ret; + } + + ret = init_mgr_tbl(hdev); + if (ret) { + dev_err(&pdev->dev, + "failed to reinit manager table, ret = %d\n", ret); + return ret; + } + + ret = hclge_init_fd_config(hdev); + if (ret) { + dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret); + return ret; + } + + ret = hclge_ptp_init(hdev); + if (ret) + return ret; + + /* Log and clear the hw errors those already occurred */ + if (hnae3_dev_ras_imp_supported(hdev)) + hclge_handle_occurred_error(hdev); + else + hclge_handle_all_hns_hw_errors(ae_dev); + + /* Re-enable the hw error interrupts because + * the interrupts get disabled on global reset. + */ + ret = hclge_config_nic_hw_error(hdev, true); + if (ret) { + dev_err(&pdev->dev, + "fail(%d) to re-enable NIC hw error interrupts\n", + ret); + return ret; + } + + if (hdev->roce_client) { + ret = hclge_config_rocee_ras_interrupt(hdev, true); + if (ret) { + dev_err(&pdev->dev, + "fail(%d) to re-enable roce ras interrupts\n", + ret); + return ret; + } + } + + hclge_reset_vport_state(hdev); + ret = hclge_reset_vport_spoofchk(hdev); + if (ret) + return ret; + + ret = hclge_resume_vf_rate(hdev); + if (ret) + return ret; + + hclge_init_rxd_adv_layout(hdev); + + dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n", + HCLGE_DRIVER_NAME); + + return 0; +} + +static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) +{ + struct hclge_dev *hdev = ae_dev->priv; + struct hclge_mac *mac = &hdev->hw.mac; + + hclge_reset_vf_rate(hdev); + hclge_clear_vf_vlan(hdev); + hclge_state_uninit(hdev); + hclge_ptp_uninit(hdev); + hclge_uninit_rxd_adv_layout(hdev); + hclge_uninit_mac_table(hdev); + hclge_del_all_fd_entries(hdev); + + if (mac->phydev) + mdiobus_unregister(mac->mdio_bus); + + /* Disable MISC vector(vector0) */ + hclge_enable_vector(&hdev->misc_vector, false); + synchronize_irq(hdev->misc_vector.vector_irq); + + /* Disable all hw interrupts */ + hclge_config_mac_tnl_int(hdev, false); + hclge_config_nic_hw_error(hdev, false); + hclge_config_rocee_ras_interrupt(hdev, false); + + hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw); + hclge_misc_irq_uninit(hdev); + hclge_devlink_uninit(hdev); + hclge_pci_uninit(hdev); + hclge_uninit_vport_vlan_table(hdev); + mutex_destroy(&hdev->vport_lock); + ae_dev->priv = NULL; +} + +static u32 hclge_get_max_channels(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps); +} + +static void hclge_get_channels(struct hnae3_handle *handle, + struct ethtool_channels *ch) +{ + ch->max_combined = hclge_get_max_channels(handle); + ch->other_count = 1; + ch->max_other = 1; + ch->combined_count = handle->kinfo.rss_size; +} + +static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle, + u16 *alloc_tqps, u16 *max_rss_size) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + *alloc_tqps = vport->alloc_tqps; + *max_rss_size = hdev->pf_rss_size_max; +} + +static int hclge_set_rss_tc_mode_cfg(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + u16 tc_offset[HCLGE_MAX_TC_NUM] = {0}; + struct hclge_dev *hdev = vport->back; + u16 tc_size[HCLGE_MAX_TC_NUM] = {0}; + u16 tc_valid[HCLGE_MAX_TC_NUM]; + u16 roundup_size; + unsigned int i; + + roundup_size = roundup_pow_of_two(vport->nic.kinfo.rss_size); + roundup_size = ilog2(roundup_size); + /* Set the RSS TC mode according to the new RSS size */ + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { + tc_valid[i] = 0; + + if (!(hdev->hw_tc_map & BIT(i))) + continue; + + tc_valid[i] = 1; + tc_size[i] = roundup_size; + tc_offset[i] = vport->nic.kinfo.rss_size * i; + } + + return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, tc_valid, + tc_size); +} + +static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num, + bool rxfh_configured) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); + struct hclge_vport *vport = hclge_get_vport(handle); + struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; + struct hclge_dev *hdev = vport->back; + u16 cur_rss_size = kinfo->rss_size; + u16 cur_tqps = kinfo->num_tqps; + u32 *rss_indir; + unsigned int i; + int ret; + + kinfo->req_rss_size = new_tqps_num; + + ret = hclge_tm_vport_map_update(hdev); + if (ret) { + dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret); + return ret; + } + + ret = hclge_set_rss_tc_mode_cfg(handle); + if (ret) + return ret; + + /* RSS indirection table has been configured by user */ + if (rxfh_configured) + goto out; + + /* Reinitializes the rss indirect table according to the new RSS size */ + rss_indir = kcalloc(ae_dev->dev_specs.rss_ind_tbl_size, sizeof(u32), + GFP_KERNEL); + if (!rss_indir) + return -ENOMEM; + + for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++) + rss_indir[i] = i % kinfo->rss_size; + + ret = hclge_set_rss(handle, rss_indir, NULL, 0); + if (ret) + dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n", + ret); + + kfree(rss_indir); + +out: + if (!ret) + dev_info(&hdev->pdev->dev, + "Channels changed, rss_size from %u to %u, tqps from %u to %u", + cur_rss_size, kinfo->rss_size, + cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc); + + return ret; +} + +static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit, + u32 *regs_num_64_bit) +{ + struct hclge_desc desc; + u32 total_num; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Query register number cmd failed, ret = %d.\n", ret); + return ret; + } + + *regs_num_32_bit = le32_to_cpu(desc.data[0]); + *regs_num_64_bit = le32_to_cpu(desc.data[1]); + + total_num = *regs_num_32_bit + *regs_num_64_bit; + if (!total_num) + return -EINVAL; + + return 0; +} + +static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num, + void *data) +{ +#define HCLGE_32_BIT_REG_RTN_DATANUM 8 +#define HCLGE_32_BIT_DESC_NODATA_LEN 2 + + struct hclge_desc *desc; + u32 *reg_val = data; + __le32 *desc_data; + int nodata_num; + int cmd_num; + int i, k, n; + int ret; + + if (regs_num == 0) + return 0; + + nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN; + cmd_num = DIV_ROUND_UP(regs_num + nodata_num, + HCLGE_32_BIT_REG_RTN_DATANUM); + desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL); + if (!desc) + return -ENOMEM; + + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true); + ret = hclge_cmd_send(&hdev->hw, desc, cmd_num); + if (ret) { + dev_err(&hdev->pdev->dev, + "Query 32 bit register cmd failed, ret = %d.\n", ret); + kfree(desc); + return ret; + } + + for (i = 0; i < cmd_num; i++) { + if (i == 0) { + desc_data = (__le32 *)(&desc[i].data[0]); + n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num; + } else { + desc_data = (__le32 *)(&desc[i]); + n = HCLGE_32_BIT_REG_RTN_DATANUM; + } + for (k = 0; k < n; k++) { + *reg_val++ = le32_to_cpu(*desc_data++); + + regs_num--; + if (!regs_num) + break; + } + } + + kfree(desc); + return 0; +} + +static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num, + void *data) +{ +#define HCLGE_64_BIT_REG_RTN_DATANUM 4 +#define HCLGE_64_BIT_DESC_NODATA_LEN 1 + + struct hclge_desc *desc; + u64 *reg_val = data; + __le64 *desc_data; + int nodata_len; + int cmd_num; + int i, k, n; + int ret; + + if (regs_num == 0) + return 0; + + nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN; + cmd_num = DIV_ROUND_UP(regs_num + nodata_len, + HCLGE_64_BIT_REG_RTN_DATANUM); + desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL); + if (!desc) + return -ENOMEM; + + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true); + ret = hclge_cmd_send(&hdev->hw, desc, cmd_num); + if (ret) { + dev_err(&hdev->pdev->dev, + "Query 64 bit register cmd failed, ret = %d.\n", ret); + kfree(desc); + return ret; + } + + for (i = 0; i < cmd_num; i++) { + if (i == 0) { + desc_data = (__le64 *)(&desc[i].data[0]); + n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len; + } else { + desc_data = (__le64 *)(&desc[i]); + n = HCLGE_64_BIT_REG_RTN_DATANUM; + } + for (k = 0; k < n; k++) { + *reg_val++ = le64_to_cpu(*desc_data++); + + regs_num--; + if (!regs_num) + break; + } + } + + kfree(desc); + return 0; +} + +#define MAX_SEPARATE_NUM 4 +#define SEPARATOR_VALUE 0xFDFCFBFA +#define REG_NUM_PER_LINE 4 +#define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32)) +#define REG_SEPARATOR_LINE 1 +#define REG_NUM_REMAIN_MASK 3 + +int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc) +{ + int i; + + /* initialize command BD except the last one */ + for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) { + hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, + true); + desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + } + + /* initialize the last command BD */ + hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true); + + return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT); +} + +static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev, + int *bd_num_list, + u32 type_num) +{ + u32 entries_per_desc, desc_index, index, offset, i; + struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT]; + int ret; + + ret = hclge_query_bd_num_cmd_send(hdev, desc); + if (ret) { + dev_err(&hdev->pdev->dev, + "Get dfx bd num fail, status is %d.\n", ret); + return ret; + } + + entries_per_desc = ARRAY_SIZE(desc[0].data); + for (i = 0; i < type_num; i++) { + offset = hclge_dfx_bd_offset_list[i]; + index = offset % entries_per_desc; + desc_index = offset / entries_per_desc; + bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]); + } + + return ret; +} + +static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev, + struct hclge_desc *desc_src, int bd_num, + enum hclge_opcode_type cmd) +{ + struct hclge_desc *desc = desc_src; + int i, ret; + + hclge_cmd_setup_basic_desc(desc, cmd, true); + for (i = 0; i < bd_num - 1; i++) { + desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + desc++; + hclge_cmd_setup_basic_desc(desc, cmd, true); + } + + desc = desc_src; + ret = hclge_cmd_send(&hdev->hw, desc, bd_num); + if (ret) + dev_err(&hdev->pdev->dev, + "Query dfx reg cmd(0x%x) send fail, status is %d.\n", + cmd, ret); + + return ret; +} + +static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num, + void *data) +{ + int entries_per_desc, reg_num, separator_num, desc_index, index, i; + struct hclge_desc *desc = desc_src; + u32 *reg = data; + + entries_per_desc = ARRAY_SIZE(desc->data); + reg_num = entries_per_desc * bd_num; + separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK); + for (i = 0; i < reg_num; i++) { + index = i % entries_per_desc; + desc_index = i / entries_per_desc; + *reg++ = le32_to_cpu(desc[desc_index].data[index]); + } + for (i = 0; i < separator_num; i++) + *reg++ = SEPARATOR_VALUE; + + return reg_num + separator_num; +} + +static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len) +{ + u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list); + int data_len_per_desc, bd_num, i; + int *bd_num_list; + u32 data_len; + int ret; + + bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL); + if (!bd_num_list) + return -ENOMEM; + + ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num); + if (ret) { + dev_err(&hdev->pdev->dev, + "Get dfx reg bd num fail, status is %d.\n", ret); + goto out; + } + + data_len_per_desc = sizeof_field(struct hclge_desc, data); + *len = 0; + for (i = 0; i < dfx_reg_type_num; i++) { + bd_num = bd_num_list[i]; + data_len = data_len_per_desc * bd_num; + *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE; + } + +out: + kfree(bd_num_list); + return ret; +} + +static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data) +{ + u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list); + int bd_num, bd_num_max, buf_len, i; + struct hclge_desc *desc_src; + int *bd_num_list; + u32 *reg = data; + int ret; + + bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL); + if (!bd_num_list) + return -ENOMEM; + + ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num); + if (ret) { + dev_err(&hdev->pdev->dev, + "Get dfx reg bd num fail, status is %d.\n", ret); + goto out; + } + + bd_num_max = bd_num_list[0]; + for (i = 1; i < dfx_reg_type_num; i++) + bd_num_max = max_t(int, bd_num_max, bd_num_list[i]); + + buf_len = sizeof(*desc_src) * bd_num_max; + desc_src = kzalloc(buf_len, GFP_KERNEL); + if (!desc_src) { + ret = -ENOMEM; + goto out; + } + + for (i = 0; i < dfx_reg_type_num; i++) { + bd_num = bd_num_list[i]; + ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num, + hclge_dfx_reg_opcode_list[i]); + if (ret) { + dev_err(&hdev->pdev->dev, + "Get dfx reg fail, status is %d.\n", ret); + break; + } + + reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg); + } + + kfree(desc_src); +out: + kfree(bd_num_list); + return ret; +} + +static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data, + struct hnae3_knic_private_info *kinfo) +{ +#define HCLGE_RING_REG_OFFSET 0x200 +#define HCLGE_RING_INT_REG_OFFSET 0x4 + + int i, j, reg_num, separator_num; + int data_num_sum; + u32 *reg = data; + + /* fetching per-PF registers valus from PF PCIe register space */ + reg_num = ARRAY_SIZE(cmdq_reg_addr_list); + separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK); + for (i = 0; i < reg_num; i++) + *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]); + for (i = 0; i < separator_num; i++) + *reg++ = SEPARATOR_VALUE; + data_num_sum = reg_num + separator_num; + + reg_num = ARRAY_SIZE(common_reg_addr_list); + separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK); + for (i = 0; i < reg_num; i++) + *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]); + for (i = 0; i < separator_num; i++) + *reg++ = SEPARATOR_VALUE; + data_num_sum += reg_num + separator_num; + + reg_num = ARRAY_SIZE(ring_reg_addr_list); + separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK); + for (j = 0; j < kinfo->num_tqps; j++) { + for (i = 0; i < reg_num; i++) + *reg++ = hclge_read_dev(&hdev->hw, + ring_reg_addr_list[i] + + HCLGE_RING_REG_OFFSET * j); + for (i = 0; i < separator_num; i++) + *reg++ = SEPARATOR_VALUE; + } + data_num_sum += (reg_num + separator_num) * kinfo->num_tqps; + + reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list); + separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK); + for (j = 0; j < hdev->num_msi_used - 1; j++) { + for (i = 0; i < reg_num; i++) + *reg++ = hclge_read_dev(&hdev->hw, + tqp_intr_reg_addr_list[i] + + HCLGE_RING_INT_REG_OFFSET * j); + for (i = 0; i < separator_num; i++) + *reg++ = SEPARATOR_VALUE; + } + data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1); + + return data_num_sum; +} + +static int hclge_get_regs_len(struct hnae3_handle *handle) +{ + int cmdq_lines, common_lines, ring_lines, tqp_intr_lines; + struct hnae3_knic_private_info *kinfo = &handle->kinfo; + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + int regs_num_32_bit, regs_num_64_bit, dfx_regs_len; + int regs_lines_32_bit, regs_lines_64_bit; + int ret; + + ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit); + if (ret) { + dev_err(&hdev->pdev->dev, + "Get register number failed, ret = %d.\n", ret); + return ret; + } + + ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len); + if (ret) { + dev_err(&hdev->pdev->dev, + "Get dfx reg len failed, ret = %d.\n", ret); + return ret; + } + + cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + + REG_SEPARATOR_LINE; + common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + + REG_SEPARATOR_LINE; + ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + + REG_SEPARATOR_LINE; + tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + + REG_SEPARATOR_LINE; + regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE + + REG_SEPARATOR_LINE; + regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE + + REG_SEPARATOR_LINE; + + return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps + + tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit + + regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len; +} + +static void hclge_get_regs(struct hnae3_handle *handle, u32 *version, + void *data) +{ + struct hnae3_knic_private_info *kinfo = &handle->kinfo; + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + u32 regs_num_32_bit, regs_num_64_bit; + int i, reg_num, separator_num, ret; + u32 *reg = data; + + *version = hdev->fw_version; + + ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit); + if (ret) { + dev_err(&hdev->pdev->dev, + "Get register number failed, ret = %d.\n", ret); + return; + } + + reg += hclge_fetch_pf_reg(hdev, reg, kinfo); + + ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg); + if (ret) { + dev_err(&hdev->pdev->dev, + "Get 32 bit register failed, ret = %d.\n", ret); + return; + } + reg_num = regs_num_32_bit; + reg += reg_num; + separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK); + for (i = 0; i < separator_num; i++) + *reg++ = SEPARATOR_VALUE; + + ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg); + if (ret) { + dev_err(&hdev->pdev->dev, + "Get 64 bit register failed, ret = %d.\n", ret); + return; + } + reg_num = regs_num_64_bit * 2; + reg += reg_num; + separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK); + for (i = 0; i < separator_num; i++) + *reg++ = SEPARATOR_VALUE; + + ret = hclge_get_dfx_reg(hdev, reg); + if (ret) + dev_err(&hdev->pdev->dev, + "Get dfx register failed, ret = %d.\n", ret); +} + +static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status) +{ + struct hclge_set_led_state_cmd *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false); + + req = (struct hclge_set_led_state_cmd *)desc.data; + hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M, + HCLGE_LED_LOCATE_STATE_S, locate_led_status); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "Send set led state cmd error, ret =%d\n", ret); + + return ret; +} + +enum hclge_led_status { + HCLGE_LED_OFF, + HCLGE_LED_ON, + HCLGE_LED_NO_CHANGE = 0xFF, +}; + +static int hclge_set_led_id(struct hnae3_handle *handle, + enum ethtool_phys_id_state status) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + switch (status) { + case ETHTOOL_ID_ACTIVE: + return hclge_set_led_status(hdev, HCLGE_LED_ON); + case ETHTOOL_ID_INACTIVE: + return hclge_set_led_status(hdev, HCLGE_LED_OFF); + default: + return -EINVAL; + } +} + +static void hclge_get_link_mode(struct hnae3_handle *handle, + unsigned long *supported, + unsigned long *advertising) +{ + unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS); + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + unsigned int idx = 0; + + for (; idx < size; idx++) { + supported[idx] = hdev->hw.mac.supported[idx]; + advertising[idx] = hdev->hw.mac.advertising[idx]; + } +} + +static int hclge_gro_en(struct hnae3_handle *handle, bool enable) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + bool gro_en_old = hdev->gro_en; + int ret; + + hdev->gro_en = enable; + ret = hclge_config_gro(hdev); + if (ret) + hdev->gro_en = gro_en_old; + + return ret; +} + +static int hclge_sync_vport_promisc_mode(struct hclge_vport *vport) +{ + struct hnae3_handle *handle = &vport->nic; + struct hclge_dev *hdev = vport->back; + bool uc_en = false; + bool mc_en = false; + u8 tmp_flags; + bool bc_en; + int ret; + + if (vport->last_promisc_flags != vport->overflow_promisc_flags) { + set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state); + vport->last_promisc_flags = vport->overflow_promisc_flags; + } + + if (!test_and_clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, + &vport->state)) + return 0; + + /* for PF */ + if (!vport->vport_id) { + tmp_flags = handle->netdev_flags | vport->last_promisc_flags; + ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE, + tmp_flags & HNAE3_MPE); + if (!ret) + set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, + &vport->state); + else + set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, + &vport->state); + return ret; + } + + /* for VF */ + if (vport->vf_info.trusted) { + uc_en = vport->vf_info.request_uc_en > 0 || + vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE; + mc_en = vport->vf_info.request_mc_en > 0 || + vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE; + } + bc_en = vport->vf_info.request_bc_en > 0; + + ret = hclge_cmd_set_promisc_mode(hdev, vport->vport_id, uc_en, + mc_en, bc_en); + if (ret) { + set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state); + return ret; + } + hclge_set_vport_vlan_fltr_change(vport); + + return 0; +} + +static void hclge_sync_promisc_mode(struct hclge_dev *hdev) +{ + struct hclge_vport *vport; + int ret; + u16 i; + + for (i = 0; i < hdev->num_alloc_vport; i++) { + vport = &hdev->vport[i]; + + ret = hclge_sync_vport_promisc_mode(vport); + if (ret) + return; + } +} + +static bool hclge_module_existed(struct hclge_dev *hdev) +{ + struct hclge_desc desc; + u32 existed; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get SFP exist state, ret = %d\n", ret); + return false; + } + + existed = le32_to_cpu(desc.data[0]); + + return existed != 0; +} + +/* need 6 bds(total 140 bytes) in one reading + * return the number of bytes actually read, 0 means read failed. + */ +static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset, + u32 len, u8 *data) +{ + struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM]; + struct hclge_sfp_info_bd0_cmd *sfp_info_bd0; + u16 read_len; + u16 copy_len; + int ret; + int i; + + /* setup all 6 bds to read module eeprom info. */ + for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) { + hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM, + true); + + /* bd0~bd4 need next flag */ + if (i < HCLGE_SFP_INFO_CMD_NUM - 1) + desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + } + + /* setup bd0, this bd contains offset and read length. */ + sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data; + sfp_info_bd0->offset = cpu_to_le16((u16)offset); + read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN); + sfp_info_bd0->read_len = cpu_to_le16(read_len); + + ret = hclge_cmd_send(&hdev->hw, desc, i); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get SFP eeprom info, ret = %d\n", ret); + return 0; + } + + /* copy sfp info from bd0 to out buffer. */ + copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN); + memcpy(data, sfp_info_bd0->data, copy_len); + read_len = copy_len; + + /* copy sfp info from bd1~bd5 to out buffer if needed. */ + for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) { + if (read_len >= len) + return read_len; + + copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN); + memcpy(data + read_len, desc[i].data, copy_len); + read_len += copy_len; + } + + return read_len; +} + +static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset, + u32 len, u8 *data) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + u32 read_len = 0; + u16 data_len; + + if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER) + return -EOPNOTSUPP; + + if (!hclge_module_existed(hdev)) + return -ENXIO; + + while (read_len < len) { + data_len = hclge_get_sfp_eeprom_info(hdev, + offset + read_len, + len - read_len, + data + read_len); + if (!data_len) + return -EIO; + + read_len += data_len; + } + + return 0; +} + +static int hclge_get_link_diagnosis_info(struct hnae3_handle *handle, + u32 *status_code) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hclge_desc desc; + int ret; + + if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) + return -EOPNOTSUPP; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_DIAGNOSIS, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to query link diagnosis info, ret = %d\n", ret); + return ret; + } + + *status_code = le32_to_cpu(desc.data[0]); + return 0; +} + +/* After disable sriov, VF still has some config and info need clean, + * which configed by PF. + */ +static void hclge_clear_vport_vf_info(struct hclge_vport *vport, int vfid) +{ + struct hclge_dev *hdev = vport->back; + struct hclge_vlan_info vlan_info; + int ret; + + clear_bit(HCLGE_VPORT_STATE_INITED, &vport->state); + clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); + vport->need_notify = 0; + vport->mps = 0; + + /* after disable sriov, clean VF rate configured by PF */ + ret = hclge_tm_qs_shaper_cfg(vport, 0); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to clean vf%d rate config, ret = %d\n", + vfid, ret); + + vlan_info.vlan_tag = 0; + vlan_info.qos = 0; + vlan_info.vlan_proto = ETH_P_8021Q; + ret = hclge_update_port_base_vlan_cfg(vport, + HNAE3_PORT_BASE_VLAN_DISABLE, + &vlan_info); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to clean vf%d port base vlan, ret = %d\n", + vfid, ret); + + ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, false); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to clean vf%d spoof config, ret = %d\n", + vfid, ret); + + memset(&vport->vf_info, 0, sizeof(vport->vf_info)); +} + +static void hclge_clean_vport_config(struct hnae3_ae_dev *ae_dev, int num_vfs) +{ + struct hclge_dev *hdev = ae_dev->priv; + struct hclge_vport *vport; + int i; + + for (i = 0; i < num_vfs; i++) { + vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM]; + + hclge_clear_vport_vf_info(vport, i); + } +} + +static int hclge_get_dscp_prio(struct hnae3_handle *h, u8 dscp, u8 *tc_mode, + u8 *priority) +{ + struct hclge_vport *vport = hclge_get_vport(h); + + if (dscp >= HNAE3_MAX_DSCP) + return -EINVAL; + + if (tc_mode) + *tc_mode = vport->nic.kinfo.tc_map_mode; + if (priority) + *priority = vport->nic.kinfo.dscp_prio[dscp] == HNAE3_PRIO_ID_INVALID ? 0 : + vport->nic.kinfo.dscp_prio[dscp]; + + return 0; +} + +static const struct hnae3_ae_ops hclge_ops = { + .init_ae_dev = hclge_init_ae_dev, + .uninit_ae_dev = hclge_uninit_ae_dev, + .reset_prepare = hclge_reset_prepare_general, + .reset_done = hclge_reset_done, + .init_client_instance = hclge_init_client_instance, + .uninit_client_instance = hclge_uninit_client_instance, + .map_ring_to_vector = hclge_map_ring_to_vector, + .unmap_ring_from_vector = hclge_unmap_ring_frm_vector, + .get_vector = hclge_get_vector, + .put_vector = hclge_put_vector, + .set_promisc_mode = hclge_set_promisc_mode, + .request_update_promisc_mode = hclge_request_update_promisc_mode, + .set_loopback = hclge_set_loopback, + .start = hclge_ae_start, + .stop = hclge_ae_stop, + .client_start = hclge_client_start, + .client_stop = hclge_client_stop, + .get_status = hclge_get_status, + .get_ksettings_an_result = hclge_get_ksettings_an_result, + .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h, + .get_media_type = hclge_get_media_type, + .check_port_speed = hclge_check_port_speed, + .get_fec_stats = hclge_get_fec_stats, + .get_fec = hclge_get_fec, + .set_fec = hclge_set_fec, + .get_rss_key_size = hclge_comm_get_rss_key_size, + .get_rss = hclge_get_rss, + .set_rss = hclge_set_rss, + .set_rss_tuple = hclge_set_rss_tuple, + .get_rss_tuple = hclge_get_rss_tuple, + .get_tc_size = hclge_get_tc_size, + .get_mac_addr = hclge_get_mac_addr, + .set_mac_addr = hclge_set_mac_addr, + .do_ioctl = hclge_do_ioctl, + .add_uc_addr = hclge_add_uc_addr, + .rm_uc_addr = hclge_rm_uc_addr, + .add_mc_addr = hclge_add_mc_addr, + .rm_mc_addr = hclge_rm_mc_addr, + .set_autoneg = hclge_set_autoneg, + .get_autoneg = hclge_get_autoneg, + .restart_autoneg = hclge_restart_autoneg, + .halt_autoneg = hclge_halt_autoneg, + .get_pauseparam = hclge_get_pauseparam, + .set_pauseparam = hclge_set_pauseparam, + .set_mtu = hclge_set_mtu, + .reset_queue = hclge_reset_tqp, + .get_stats = hclge_get_stats, + .get_mac_stats = hclge_get_mac_stat, + .update_stats = hclge_update_stats, + .get_strings = hclge_get_strings, + .get_sset_count = hclge_get_sset_count, + .get_fw_version = hclge_get_fw_version, + .get_mdix_mode = hclge_get_mdix_mode, + .enable_vlan_filter = hclge_enable_vlan_filter, + .set_vlan_filter = hclge_set_vlan_filter, + .set_vf_vlan_filter = hclge_set_vf_vlan_filter, + .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag, + .reset_event = hclge_reset_event, + .get_reset_level = hclge_get_reset_level, + .set_default_reset_request = hclge_set_def_reset_request, + .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info, + .set_channels = hclge_set_channels, + .get_channels = hclge_get_channels, + .get_regs_len = hclge_get_regs_len, + .get_regs = hclge_get_regs, + .set_led_id = hclge_set_led_id, + .get_link_mode = hclge_get_link_mode, + .add_fd_entry = hclge_add_fd_entry, + .del_fd_entry = hclge_del_fd_entry, + .get_fd_rule_cnt = hclge_get_fd_rule_cnt, + .get_fd_rule_info = hclge_get_fd_rule_info, + .get_fd_all_rules = hclge_get_all_rules, + .enable_fd = hclge_enable_fd, + .add_arfs_entry = hclge_add_fd_entry_by_arfs, + .dbg_read_cmd = hclge_dbg_read_cmd, + .handle_hw_ras_error = hclge_handle_hw_ras_error, + .get_hw_reset_stat = hclge_get_hw_reset_stat, + .ae_dev_resetting = hclge_ae_dev_resetting, + .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt, + .set_gro_en = hclge_gro_en, + .get_global_queue_id = hclge_covert_handle_qid_global, + .set_timer_task = hclge_set_timer_task, + .mac_connect_phy = hclge_mac_connect_phy, + .mac_disconnect_phy = hclge_mac_disconnect_phy, + .get_vf_config = hclge_get_vf_config, + .set_vf_link_state = hclge_set_vf_link_state, + .set_vf_spoofchk = hclge_set_vf_spoofchk, + .set_vf_trust = hclge_set_vf_trust, + .set_vf_rate = hclge_set_vf_rate, + .set_vf_mac = hclge_set_vf_mac, + .get_module_eeprom = hclge_get_module_eeprom, + .get_cmdq_stat = hclge_get_cmdq_stat, + .add_cls_flower = hclge_add_cls_flower, + .del_cls_flower = hclge_del_cls_flower, + .cls_flower_active = hclge_is_cls_flower_active, + .get_phy_link_ksettings = hclge_get_phy_link_ksettings, + .set_phy_link_ksettings = hclge_set_phy_link_ksettings, + .set_tx_hwts_info = hclge_ptp_set_tx_info, + .get_rx_hwts = hclge_ptp_get_rx_hwts, + .get_ts_info = hclge_ptp_get_ts_info, + .get_link_diagnosis_info = hclge_get_link_diagnosis_info, + .clean_vf_config = hclge_clean_vport_config, + .get_dscp_prio = hclge_get_dscp_prio, +}; + +static struct hnae3_ae_algo ae_algo = { + .ops = &hclge_ops, + .pdev_id_table = ae_algo_pci_tbl, +}; + +static int __init hclge_init(void) +{ + pr_info("%s is initializing\n", HCLGE_NAME); + + hclge_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGE_NAME); + if (!hclge_wq) { + pr_err("%s: failed to create workqueue\n", HCLGE_NAME); + return -ENOMEM; + } + + hnae3_register_ae_algo(&ae_algo); + + return 0; +} + +static void __exit hclge_exit(void) +{ + hnae3_unregister_ae_algo_prepare(&ae_algo); + hnae3_unregister_ae_algo(&ae_algo); + destroy_workqueue(hclge_wq); +} +module_init(hclge_init); +module_exit(hclge_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Huawei Tech. Co., Ltd."); +MODULE_DESCRIPTION("HCLGE Driver"); +MODULE_VERSION(HCLGE_MOD_VERSION); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h new file mode 100644 index 000000000..f6fef790e --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -0,0 +1,1149 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +// Copyright (c) 2016-2017 Hisilicon Limited. + +#ifndef __HCLGE_MAIN_H +#define __HCLGE_MAIN_H +#include +#include +#include +#include +#include +#include + +#include "hclge_cmd.h" +#include "hclge_ptp.h" +#include "hnae3.h" +#include "hclge_comm_rss.h" +#include "hclge_comm_tqp_stats.h" + +#define HCLGE_MOD_VERSION "1.0" +#define HCLGE_DRIVER_NAME "hclge" + +#define HCLGE_MAX_PF_NUM 8 + +#define HCLGE_VF_VPORT_START_NUM 1 + +#define HCLGE_RD_FIRST_STATS_NUM 2 +#define HCLGE_RD_OTHER_STATS_NUM 4 + +#define HCLGE_INVALID_VPORT 0xffff + +#define HCLGE_PF_CFG_BLOCK_SIZE 32 +#define HCLGE_PF_CFG_DESC_NUM \ + (HCLGE_PF_CFG_BLOCK_SIZE / HCLGE_CFG_RD_LEN_BYTES) + +#define HCLGE_VECTOR_REG_BASE 0x20000 +#define HCLGE_VECTOR_EXT_REG_BASE 0x30000 +#define HCLGE_MISC_VECTOR_REG_BASE 0x20400 + +#define HCLGE_VECTOR_REG_OFFSET 0x4 +#define HCLGE_VECTOR_REG_OFFSET_H 0x1000 +#define HCLGE_VECTOR_VF_OFFSET 0x100000 + +#define HCLGE_NIC_CSQ_DEPTH_REG 0x27008 + +/* bar registers for common func */ +#define HCLGE_GRO_EN_REG 0x28000 +#define HCLGE_RXD_ADV_LAYOUT_EN_REG 0x28008 + +/* bar registers for rcb */ +#define HCLGE_RING_RX_ADDR_L_REG 0x80000 +#define HCLGE_RING_RX_ADDR_H_REG 0x80004 +#define HCLGE_RING_RX_BD_NUM_REG 0x80008 +#define HCLGE_RING_RX_BD_LENGTH_REG 0x8000C +#define HCLGE_RING_RX_MERGE_EN_REG 0x80014 +#define HCLGE_RING_RX_TAIL_REG 0x80018 +#define HCLGE_RING_RX_HEAD_REG 0x8001C +#define HCLGE_RING_RX_FBD_NUM_REG 0x80020 +#define HCLGE_RING_RX_OFFSET_REG 0x80024 +#define HCLGE_RING_RX_FBD_OFFSET_REG 0x80028 +#define HCLGE_RING_RX_STASH_REG 0x80030 +#define HCLGE_RING_RX_BD_ERR_REG 0x80034 +#define HCLGE_RING_TX_ADDR_L_REG 0x80040 +#define HCLGE_RING_TX_ADDR_H_REG 0x80044 +#define HCLGE_RING_TX_BD_NUM_REG 0x80048 +#define HCLGE_RING_TX_PRIORITY_REG 0x8004C +#define HCLGE_RING_TX_TC_REG 0x80050 +#define HCLGE_RING_TX_MERGE_EN_REG 0x80054 +#define HCLGE_RING_TX_TAIL_REG 0x80058 +#define HCLGE_RING_TX_HEAD_REG 0x8005C +#define HCLGE_RING_TX_FBD_NUM_REG 0x80060 +#define HCLGE_RING_TX_OFFSET_REG 0x80064 +#define HCLGE_RING_TX_EBD_NUM_REG 0x80068 +#define HCLGE_RING_TX_EBD_OFFSET_REG 0x80070 +#define HCLGE_RING_TX_BD_ERR_REG 0x80074 +#define HCLGE_RING_EN_REG 0x80090 + +/* bar registers for tqp interrupt */ +#define HCLGE_TQP_INTR_CTRL_REG 0x20000 +#define HCLGE_TQP_INTR_GL0_REG 0x20100 +#define HCLGE_TQP_INTR_GL1_REG 0x20200 +#define HCLGE_TQP_INTR_GL2_REG 0x20300 +#define HCLGE_TQP_INTR_RL_REG 0x20900 + +#define HCLGE_RSS_IND_TBL_SIZE 512 + +#define HCLGE_RSS_TC_SIZE_0 1 +#define HCLGE_RSS_TC_SIZE_1 2 +#define HCLGE_RSS_TC_SIZE_2 4 +#define HCLGE_RSS_TC_SIZE_3 8 +#define HCLGE_RSS_TC_SIZE_4 16 +#define HCLGE_RSS_TC_SIZE_5 32 +#define HCLGE_RSS_TC_SIZE_6 64 +#define HCLGE_RSS_TC_SIZE_7 128 + +#define HCLGE_UMV_TBL_SIZE 3072 +#define HCLGE_DEFAULT_UMV_SPACE_PER_PF \ + (HCLGE_UMV_TBL_SIZE / HCLGE_MAX_PF_NUM) + +#define HCLGE_TQP_RESET_TRY_TIMES 200 + +#define HCLGE_PHY_PAGE_MDIX 0 +#define HCLGE_PHY_PAGE_COPPER 0 + +/* Page Selection Reg. */ +#define HCLGE_PHY_PAGE_REG 22 + +/* Copper Specific Control Register */ +#define HCLGE_PHY_CSC_REG 16 + +/* Copper Specific Status Register */ +#define HCLGE_PHY_CSS_REG 17 + +#define HCLGE_PHY_MDIX_CTRL_S 5 +#define HCLGE_PHY_MDIX_CTRL_M GENMASK(6, 5) + +#define HCLGE_PHY_MDIX_STATUS_B 6 +#define HCLGE_PHY_SPEED_DUP_RESOLVE_B 11 + +#define HCLGE_GET_DFX_REG_TYPE_CNT 4 + +/* Factor used to calculate offset and bitmap of VF num */ +#define HCLGE_VF_NUM_PER_CMD 64 + +#define HCLGE_MAX_QSET_NUM 1024 + +#define HCLGE_DBG_RESET_INFO_LEN 1024 + +enum HLCGE_PORT_TYPE { + HOST_PORT, + NETWORK_PORT +}; + +#define PF_VPORT_ID 0 + +#define HCLGE_PF_ID_S 0 +#define HCLGE_PF_ID_M GENMASK(2, 0) +#define HCLGE_VF_ID_S 3 +#define HCLGE_VF_ID_M GENMASK(10, 3) +#define HCLGE_PORT_TYPE_B 11 +#define HCLGE_NETWORK_PORT_ID_S 0 +#define HCLGE_NETWORK_PORT_ID_M GENMASK(3, 0) + +/* Reset related Registers */ +#define HCLGE_PF_OTHER_INT_REG 0x20600 +#define HCLGE_MISC_RESET_STS_REG 0x20700 +#define HCLGE_MISC_VECTOR_INT_STS 0x20800 +#define HCLGE_GLOBAL_RESET_REG 0x20A00 +#define HCLGE_GLOBAL_RESET_BIT 0 +#define HCLGE_CORE_RESET_BIT 1 +#define HCLGE_IMP_RESET_BIT 2 +#define HCLGE_RESET_INT_M GENMASK(7, 5) +#define HCLGE_FUN_RST_ING 0x20C00 +#define HCLGE_FUN_RST_ING_B 0 + +/* Vector0 register bits define */ +#define HCLGE_VECTOR0_REG_PTP_INT_B 0 +#define HCLGE_VECTOR0_GLOBALRESET_INT_B 5 +#define HCLGE_VECTOR0_CORERESET_INT_B 6 +#define HCLGE_VECTOR0_IMPRESET_INT_B 7 + +/* Vector0 interrupt CMDQ event source register(RW) */ +#define HCLGE_VECTOR0_CMDQ_SRC_REG 0x27100 +/* CMDQ register bits for RX event(=MBX event) */ +#define HCLGE_VECTOR0_RX_CMDQ_INT_B 1 + +#define HCLGE_VECTOR0_IMP_RESET_INT_B 1 +#define HCLGE_VECTOR0_IMP_CMDQ_ERR_B 4U +#define HCLGE_VECTOR0_IMP_RD_POISON_B 5U +#define HCLGE_VECTOR0_ALL_MSIX_ERR_B 6U +#define HCLGE_TRIGGER_IMP_RESET_B 7U + +#define HCLGE_TQP_MEM_SIZE 0x10000 +#define HCLGE_MEM_BAR 4 +/* in the bar4, the first half is for roce, and the second half is for nic */ +#define HCLGE_NIC_MEM_OFFSET(hdev) \ + (pci_resource_len((hdev)->pdev, HCLGE_MEM_BAR) >> 1) +#define HCLGE_TQP_MEM_OFFSET(hdev, i) \ + (HCLGE_NIC_MEM_OFFSET(hdev) + HCLGE_TQP_MEM_SIZE * (i)) + +#define HCLGE_MAC_DEFAULT_FRAME \ + (ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN + ETH_DATA_LEN) +#define HCLGE_MAC_MIN_FRAME 64 +#define HCLGE_MAC_MAX_FRAME 9728 + +#define HCLGE_SUPPORT_1G_BIT BIT(0) +#define HCLGE_SUPPORT_10G_BIT BIT(1) +#define HCLGE_SUPPORT_25G_BIT BIT(2) +#define HCLGE_SUPPORT_50G_BIT BIT(3) +#define HCLGE_SUPPORT_100G_BIT BIT(4) +/* to be compatible with exsit board */ +#define HCLGE_SUPPORT_40G_BIT BIT(5) +#define HCLGE_SUPPORT_100M_BIT BIT(6) +#define HCLGE_SUPPORT_10M_BIT BIT(7) +#define HCLGE_SUPPORT_200G_BIT BIT(8) +#define HCLGE_SUPPORT_GE \ + (HCLGE_SUPPORT_1G_BIT | HCLGE_SUPPORT_100M_BIT | HCLGE_SUPPORT_10M_BIT) + +enum HCLGE_DEV_STATE { + HCLGE_STATE_REINITING, + HCLGE_STATE_DOWN, + HCLGE_STATE_DISABLED, + HCLGE_STATE_REMOVING, + HCLGE_STATE_NIC_REGISTERED, + HCLGE_STATE_ROCE_REGISTERED, + HCLGE_STATE_SERVICE_INITED, + HCLGE_STATE_RST_SERVICE_SCHED, + HCLGE_STATE_RST_HANDLING, + HCLGE_STATE_MBX_SERVICE_SCHED, + HCLGE_STATE_MBX_HANDLING, + HCLGE_STATE_ERR_SERVICE_SCHED, + HCLGE_STATE_STATISTICS_UPDATING, + HCLGE_STATE_LINK_UPDATING, + HCLGE_STATE_RST_FAIL, + HCLGE_STATE_FD_TBL_CHANGED, + HCLGE_STATE_FD_CLEAR_ALL, + HCLGE_STATE_FD_USER_DEF_CHANGED, + HCLGE_STATE_PTP_EN, + HCLGE_STATE_PTP_TX_HANDLING, + HCLGE_STATE_FEC_STATS_UPDATING, + HCLGE_STATE_MAX +}; + +enum hclge_evt_cause { + HCLGE_VECTOR0_EVENT_RST, + HCLGE_VECTOR0_EVENT_MBX, + HCLGE_VECTOR0_EVENT_ERR, + HCLGE_VECTOR0_EVENT_PTP, + HCLGE_VECTOR0_EVENT_OTHER, +}; + +enum HCLGE_MAC_SPEED { + HCLGE_MAC_SPEED_UNKNOWN = 0, /* unknown */ + HCLGE_MAC_SPEED_10M = 10, /* 10 Mbps */ + HCLGE_MAC_SPEED_100M = 100, /* 100 Mbps */ + HCLGE_MAC_SPEED_1G = 1000, /* 1000 Mbps = 1 Gbps */ + HCLGE_MAC_SPEED_10G = 10000, /* 10000 Mbps = 10 Gbps */ + HCLGE_MAC_SPEED_25G = 25000, /* 25000 Mbps = 25 Gbps */ + HCLGE_MAC_SPEED_40G = 40000, /* 40000 Mbps = 40 Gbps */ + HCLGE_MAC_SPEED_50G = 50000, /* 50000 Mbps = 50 Gbps */ + HCLGE_MAC_SPEED_100G = 100000, /* 100000 Mbps = 100 Gbps */ + HCLGE_MAC_SPEED_200G = 200000 /* 200000 Mbps = 200 Gbps */ +}; + +enum HCLGE_MAC_DUPLEX { + HCLGE_MAC_HALF, + HCLGE_MAC_FULL +}; + +#define QUERY_SFP_SPEED 0 +#define QUERY_ACTIVE_SPEED 1 + +struct hclge_mac { + u8 mac_id; + u8 phy_addr; + u8 flag; + u8 media_type; /* port media type, e.g. fibre/copper/backplane */ + u8 mac_addr[ETH_ALEN]; + u8 autoneg; + u8 duplex; + u8 support_autoneg; + u8 speed_type; /* 0: sfp speed, 1: active speed */ + u8 lane_num; + u32 speed; + u32 max_speed; + u32 speed_ability; /* speed ability supported by current media */ + u32 module_type; /* sub media type, e.g. kr/cr/sr/lr */ + u32 fec_mode; /* active fec mode */ + u32 user_fec_mode; + u32 fec_ability; + int link; /* store the link status of mac & phy (if phy exists) */ + struct phy_device *phydev; + struct mii_bus *mdio_bus; + phy_interface_t phy_if; + __ETHTOOL_DECLARE_LINK_MODE_MASK(supported); + __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); +}; + +struct hclge_hw { + struct hclge_comm_hw hw; + struct hclge_mac mac; + int num_vec; +}; + +enum hclge_fc_mode { + HCLGE_FC_NONE, + HCLGE_FC_RX_PAUSE, + HCLGE_FC_TX_PAUSE, + HCLGE_FC_FULL, + HCLGE_FC_PFC, + HCLGE_FC_DEFAULT +}; + +#define HCLGE_FILTER_TYPE_VF 0 +#define HCLGE_FILTER_TYPE_PORT 1 +#define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0) +#define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0) +#define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1) +#define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2) +#define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3) +#define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \ + | HCLGE_FILTER_FE_ROCE_EGRESS_B) +#define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \ + | HCLGE_FILTER_FE_ROCE_INGRESS_B) + +enum hclge_vlan_fltr_cap { + HCLGE_VLAN_FLTR_DEF, + HCLGE_VLAN_FLTR_CAN_MDF, +}; +enum hclge_link_fail_code { + HCLGE_LF_NORMAL, + HCLGE_LF_REF_CLOCK_LOST, + HCLGE_LF_XSFP_TX_DISABLE, + HCLGE_LF_XSFP_ABSENT, +}; + +#define HCLGE_LINK_STATUS_DOWN 0 +#define HCLGE_LINK_STATUS_UP 1 + +#define HCLGE_PG_NUM 4 +#define HCLGE_SCH_MODE_SP 0 +#define HCLGE_SCH_MODE_DWRR 1 +struct hclge_pg_info { + u8 pg_id; + u8 pg_sch_mode; /* 0: sp; 1: dwrr */ + u8 tc_bit_map; + u32 bw_limit; + u8 tc_dwrr[HNAE3_MAX_TC]; +}; + +struct hclge_tc_info { + u8 tc_id; + u8 tc_sch_mode; /* 0: sp; 1: dwrr */ + u8 pgid; + u32 bw_limit; +}; + +struct hclge_cfg { + u8 tc_num; + u8 vlan_fliter_cap; + u16 tqp_desc_num; + u16 rx_buf_len; + u16 vf_rss_size_max; + u16 pf_rss_size_max; + u8 phy_addr; + u8 media_type; + u8 mac_addr[ETH_ALEN]; + u8 default_speed; + u32 numa_node_map; + u32 tx_spare_buf_size; + u16 speed_ability; + u16 umv_space; +}; + +struct hclge_tm_info { + u8 num_tc; + u8 num_pg; /* It must be 1 if vNET-Base schd */ + u8 pg_dwrr[HCLGE_PG_NUM]; + u8 prio_tc[HNAE3_MAX_USER_PRIO]; + struct hclge_pg_info pg_info[HCLGE_PG_NUM]; + struct hclge_tc_info tc_info[HNAE3_MAX_TC]; + enum hclge_fc_mode fc_mode; + u8 hw_pfc_map; /* Allow for packet drop or not on this TC */ + u8 pfc_en; /* PFC enabled or not for user priority */ +}; + +/* max number of mac statistics on each version */ +#define HCLGE_MAC_STATS_MAX_NUM_V1 87 +#define HCLGE_MAC_STATS_MAX_NUM_V2 105 + +struct hclge_comm_stats_str { + char desc[ETH_GSTRING_LEN]; + u32 stats_num; + unsigned long offset; +}; + +/* mac stats ,opcode id: 0x0032 */ +struct hclge_mac_stats { + u64 mac_tx_mac_pause_num; + u64 mac_rx_mac_pause_num; + u64 rsv0; + u64 mac_tx_pfc_pri0_pkt_num; + u64 mac_tx_pfc_pri1_pkt_num; + u64 mac_tx_pfc_pri2_pkt_num; + u64 mac_tx_pfc_pri3_pkt_num; + u64 mac_tx_pfc_pri4_pkt_num; + u64 mac_tx_pfc_pri5_pkt_num; + u64 mac_tx_pfc_pri6_pkt_num; + u64 mac_tx_pfc_pri7_pkt_num; + u64 mac_rx_pfc_pri0_pkt_num; + u64 mac_rx_pfc_pri1_pkt_num; + u64 mac_rx_pfc_pri2_pkt_num; + u64 mac_rx_pfc_pri3_pkt_num; + u64 mac_rx_pfc_pri4_pkt_num; + u64 mac_rx_pfc_pri5_pkt_num; + u64 mac_rx_pfc_pri6_pkt_num; + u64 mac_rx_pfc_pri7_pkt_num; + u64 mac_tx_total_pkt_num; + u64 mac_tx_total_oct_num; + u64 mac_tx_good_pkt_num; + u64 mac_tx_bad_pkt_num; + u64 mac_tx_good_oct_num; + u64 mac_tx_bad_oct_num; + u64 mac_tx_uni_pkt_num; + u64 mac_tx_multi_pkt_num; + u64 mac_tx_broad_pkt_num; + u64 mac_tx_undersize_pkt_num; + u64 mac_tx_oversize_pkt_num; + u64 mac_tx_64_oct_pkt_num; + u64 mac_tx_65_127_oct_pkt_num; + u64 mac_tx_128_255_oct_pkt_num; + u64 mac_tx_256_511_oct_pkt_num; + u64 mac_tx_512_1023_oct_pkt_num; + u64 mac_tx_1024_1518_oct_pkt_num; + u64 mac_tx_1519_2047_oct_pkt_num; + u64 mac_tx_2048_4095_oct_pkt_num; + u64 mac_tx_4096_8191_oct_pkt_num; + u64 rsv1; + u64 mac_tx_8192_9216_oct_pkt_num; + u64 mac_tx_9217_12287_oct_pkt_num; + u64 mac_tx_12288_16383_oct_pkt_num; + u64 mac_tx_1519_max_good_oct_pkt_num; + u64 mac_tx_1519_max_bad_oct_pkt_num; + + u64 mac_rx_total_pkt_num; + u64 mac_rx_total_oct_num; + u64 mac_rx_good_pkt_num; + u64 mac_rx_bad_pkt_num; + u64 mac_rx_good_oct_num; + u64 mac_rx_bad_oct_num; + u64 mac_rx_uni_pkt_num; + u64 mac_rx_multi_pkt_num; + u64 mac_rx_broad_pkt_num; + u64 mac_rx_undersize_pkt_num; + u64 mac_rx_oversize_pkt_num; + u64 mac_rx_64_oct_pkt_num; + u64 mac_rx_65_127_oct_pkt_num; + u64 mac_rx_128_255_oct_pkt_num; + u64 mac_rx_256_511_oct_pkt_num; + u64 mac_rx_512_1023_oct_pkt_num; + u64 mac_rx_1024_1518_oct_pkt_num; + u64 mac_rx_1519_2047_oct_pkt_num; + u64 mac_rx_2048_4095_oct_pkt_num; + u64 mac_rx_4096_8191_oct_pkt_num; + u64 rsv2; + u64 mac_rx_8192_9216_oct_pkt_num; + u64 mac_rx_9217_12287_oct_pkt_num; + u64 mac_rx_12288_16383_oct_pkt_num; + u64 mac_rx_1519_max_good_oct_pkt_num; + u64 mac_rx_1519_max_bad_oct_pkt_num; + + u64 mac_tx_fragment_pkt_num; + u64 mac_tx_undermin_pkt_num; + u64 mac_tx_jabber_pkt_num; + u64 mac_tx_err_all_pkt_num; + u64 mac_tx_from_app_good_pkt_num; + u64 mac_tx_from_app_bad_pkt_num; + u64 mac_rx_fragment_pkt_num; + u64 mac_rx_undermin_pkt_num; + u64 mac_rx_jabber_pkt_num; + u64 mac_rx_fcs_err_pkt_num; + u64 mac_rx_send_app_good_pkt_num; + u64 mac_rx_send_app_bad_pkt_num; + u64 mac_tx_pfc_pause_pkt_num; + u64 mac_rx_pfc_pause_pkt_num; + u64 mac_tx_ctrl_pkt_num; + u64 mac_rx_ctrl_pkt_num; + + /* duration of pfc */ + u64 mac_tx_pfc_pri0_xoff_time; + u64 mac_tx_pfc_pri1_xoff_time; + u64 mac_tx_pfc_pri2_xoff_time; + u64 mac_tx_pfc_pri3_xoff_time; + u64 mac_tx_pfc_pri4_xoff_time; + u64 mac_tx_pfc_pri5_xoff_time; + u64 mac_tx_pfc_pri6_xoff_time; + u64 mac_tx_pfc_pri7_xoff_time; + u64 mac_rx_pfc_pri0_xoff_time; + u64 mac_rx_pfc_pri1_xoff_time; + u64 mac_rx_pfc_pri2_xoff_time; + u64 mac_rx_pfc_pri3_xoff_time; + u64 mac_rx_pfc_pri4_xoff_time; + u64 mac_rx_pfc_pri5_xoff_time; + u64 mac_rx_pfc_pri6_xoff_time; + u64 mac_rx_pfc_pri7_xoff_time; + + /* duration of pause */ + u64 mac_tx_pause_xoff_time; + u64 mac_rx_pause_xoff_time; +}; + +#define HCLGE_STATS_TIMER_INTERVAL 300UL + +/* fec stats ,opcode id: 0x0316 */ +#define HCLGE_FEC_STATS_MAX_LANES 8 +struct hclge_fec_stats { + /* fec rs mode total stats */ + u64 rs_corr_blocks; + u64 rs_uncorr_blocks; + u64 rs_error_blocks; + /* fec base-r mode per lanes stats */ + u64 base_r_lane_num; + u64 base_r_corr_blocks; + u64 base_r_uncorr_blocks; + union { + struct { + u64 base_r_corr_per_lanes[HCLGE_FEC_STATS_MAX_LANES]; + u64 base_r_uncorr_per_lanes[HCLGE_FEC_STATS_MAX_LANES]; + }; + u64 per_lanes[HCLGE_FEC_STATS_MAX_LANES * 2]; + }; +}; + +struct hclge_vlan_type_cfg { + u16 rx_ot_fst_vlan_type; + u16 rx_ot_sec_vlan_type; + u16 rx_in_fst_vlan_type; + u16 rx_in_sec_vlan_type; + u16 tx_ot_vlan_type; + u16 tx_in_vlan_type; +}; + +enum HCLGE_FD_MODE { + HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1, + HCLGE_FD_MODE_DEPTH_1K_WIDTH_400B_STAGE_2, + HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1, + HCLGE_FD_MODE_DEPTH_2K_WIDTH_200B_STAGE_2, +}; + +enum HCLGE_FD_KEY_TYPE { + HCLGE_FD_KEY_BASE_ON_PTYPE, + HCLGE_FD_KEY_BASE_ON_TUPLE, +}; + +enum HCLGE_FD_STAGE { + HCLGE_FD_STAGE_1, + HCLGE_FD_STAGE_2, + MAX_STAGE_NUM, +}; + +/* OUTER_XXX indicates tuples in tunnel header of tunnel packet + * INNER_XXX indicate tuples in tunneled header of tunnel packet or + * tuples of non-tunnel packet + */ +enum HCLGE_FD_TUPLE { + OUTER_DST_MAC, + OUTER_SRC_MAC, + OUTER_VLAN_TAG_FST, + OUTER_VLAN_TAG_SEC, + OUTER_ETH_TYPE, + OUTER_L2_RSV, + OUTER_IP_TOS, + OUTER_IP_PROTO, + OUTER_SRC_IP, + OUTER_DST_IP, + OUTER_L3_RSV, + OUTER_SRC_PORT, + OUTER_DST_PORT, + OUTER_L4_RSV, + OUTER_TUN_VNI, + OUTER_TUN_FLOW_ID, + INNER_DST_MAC, + INNER_SRC_MAC, + INNER_VLAN_TAG_FST, + INNER_VLAN_TAG_SEC, + INNER_ETH_TYPE, + INNER_L2_RSV, + INNER_IP_TOS, + INNER_IP_PROTO, + INNER_SRC_IP, + INNER_DST_IP, + INNER_L3_RSV, + INNER_SRC_PORT, + INNER_DST_PORT, + INNER_L4_RSV, + MAX_TUPLE, +}; + +#define HCLGE_FD_TUPLE_USER_DEF_TUPLES \ + (BIT(INNER_L2_RSV) | BIT(INNER_L3_RSV) | BIT(INNER_L4_RSV)) + +enum HCLGE_FD_META_DATA { + PACKET_TYPE_ID, + IP_FRAGEMENT, + ROCE_TYPE, + NEXT_KEY, + VLAN_NUMBER, + SRC_VPORT, + DST_VPORT, + TUNNEL_PACKET, + MAX_META_DATA, +}; + +enum HCLGE_FD_KEY_OPT { + KEY_OPT_U8, + KEY_OPT_LE16, + KEY_OPT_LE32, + KEY_OPT_MAC, + KEY_OPT_IP, + KEY_OPT_VNI, +}; + +struct key_info { + u8 key_type; + u8 key_length; /* use bit as unit */ + enum HCLGE_FD_KEY_OPT key_opt; + int offset; + int moffset; +}; + +#define MAX_KEY_LENGTH 400 +#define MAX_KEY_DWORDS DIV_ROUND_UP(MAX_KEY_LENGTH / 8, 4) +#define MAX_KEY_BYTES (MAX_KEY_DWORDS * 4) +#define MAX_META_DATA_LENGTH 32 + +#define HCLGE_FD_MAX_USER_DEF_OFFSET 9000 +#define HCLGE_FD_USER_DEF_DATA GENMASK(15, 0) +#define HCLGE_FD_USER_DEF_OFFSET GENMASK(15, 0) +#define HCLGE_FD_USER_DEF_OFFSET_UNMASK GENMASK(15, 0) + +/* assigned by firmware, the real filter number for each pf may be less */ +#define MAX_FD_FILTER_NUM 4096 +#define HCLGE_ARFS_EXPIRE_INTERVAL 5UL + +#define hclge_read_dev(a, reg) \ + hclge_comm_read_reg((a)->hw.io_base, reg) +#define hclge_write_dev(a, reg, value) \ + hclge_comm_write_reg((a)->hw.io_base, reg, value) + +enum HCLGE_FD_ACTIVE_RULE_TYPE { + HCLGE_FD_RULE_NONE, + HCLGE_FD_ARFS_ACTIVE, + HCLGE_FD_EP_ACTIVE, + HCLGE_FD_TC_FLOWER_ACTIVE, +}; + +enum HCLGE_FD_PACKET_TYPE { + NIC_PACKET, + ROCE_PACKET, +}; + +enum HCLGE_FD_ACTION { + HCLGE_FD_ACTION_SELECT_QUEUE, + HCLGE_FD_ACTION_DROP_PACKET, + HCLGE_FD_ACTION_SELECT_TC, +}; + +enum HCLGE_FD_NODE_STATE { + HCLGE_FD_TO_ADD, + HCLGE_FD_TO_DEL, + HCLGE_FD_ACTIVE, + HCLGE_FD_DELETED, +}; + +enum HCLGE_FD_USER_DEF_LAYER { + HCLGE_FD_USER_DEF_NONE, + HCLGE_FD_USER_DEF_L2, + HCLGE_FD_USER_DEF_L3, + HCLGE_FD_USER_DEF_L4, +}; + +#define HCLGE_FD_USER_DEF_LAYER_NUM 3 +struct hclge_fd_user_def_cfg { + u16 ref_cnt; + u16 offset; +}; + +struct hclge_fd_user_def_info { + enum HCLGE_FD_USER_DEF_LAYER layer; + u16 data; + u16 data_mask; + u16 offset; +}; + +struct hclge_fd_key_cfg { + u8 key_sel; + u8 inner_sipv6_word_en; + u8 inner_dipv6_word_en; + u8 outer_sipv6_word_en; + u8 outer_dipv6_word_en; + u32 tuple_active; + u32 meta_data_active; +}; + +struct hclge_fd_cfg { + u8 fd_mode; + u16 max_key_length; /* use bit as unit */ + u32 rule_num[MAX_STAGE_NUM]; /* rule entry number */ + u16 cnt_num[MAX_STAGE_NUM]; /* rule hit counter number */ + struct hclge_fd_key_cfg key_cfg[MAX_STAGE_NUM]; + struct hclge_fd_user_def_cfg user_def_cfg[HCLGE_FD_USER_DEF_LAYER_NUM]; +}; + +#define IPV4_INDEX 3 +#define IPV6_SIZE 4 +struct hclge_fd_rule_tuples { + u8 src_mac[ETH_ALEN]; + u8 dst_mac[ETH_ALEN]; + /* Be compatible for ip address of both ipv4 and ipv6. + * For ipv4 address, we store it in src/dst_ip[3]. + */ + u32 src_ip[IPV6_SIZE]; + u32 dst_ip[IPV6_SIZE]; + u16 src_port; + u16 dst_port; + u16 vlan_tag1; + u16 ether_proto; + u16 l2_user_def; + u16 l3_user_def; + u32 l4_user_def; + u8 ip_tos; + u8 ip_proto; +}; + +struct hclge_fd_rule { + struct hlist_node rule_node; + struct hclge_fd_rule_tuples tuples; + struct hclge_fd_rule_tuples tuples_mask; + u32 unused_tuple; + u32 flow_type; + union { + struct { + unsigned long cookie; + u8 tc; + } cls_flower; + struct { + u16 flow_id; /* only used for arfs */ + } arfs; + struct { + struct hclge_fd_user_def_info user_def; + } ep; + }; + u16 queue_id; + u16 vf_id; + u16 location; + enum HCLGE_FD_ACTIVE_RULE_TYPE rule_type; + enum HCLGE_FD_NODE_STATE state; + u8 action; +}; + +struct hclge_fd_ad_data { + u16 ad_id; + u8 drop_packet; + u8 forward_to_direct_queue; + u16 queue_id; + u8 use_counter; + u8 counter_id; + u8 use_next_stage; + u8 write_rule_id_to_bd; + u8 next_input_key; + u16 rule_id; + u16 tc_size; + u8 override_tc; +}; + +enum HCLGE_MAC_NODE_STATE { + HCLGE_MAC_TO_ADD, + HCLGE_MAC_TO_DEL, + HCLGE_MAC_ACTIVE +}; + +struct hclge_mac_node { + struct list_head node; + enum HCLGE_MAC_NODE_STATE state; + u8 mac_addr[ETH_ALEN]; +}; + +enum HCLGE_MAC_ADDR_TYPE { + HCLGE_MAC_ADDR_UC, + HCLGE_MAC_ADDR_MC +}; + +struct hclge_vport_vlan_cfg { + struct list_head node; + int hd_tbl_status; + u16 vlan_id; +}; + +struct hclge_rst_stats { + u32 reset_done_cnt; /* the number of reset has completed */ + u32 hw_reset_done_cnt; /* the number of HW reset has completed */ + u32 pf_rst_cnt; /* the number of PF reset */ + u32 flr_rst_cnt; /* the number of FLR */ + u32 global_rst_cnt; /* the number of GLOBAL */ + u32 imp_rst_cnt; /* the number of IMP reset */ + u32 reset_cnt; /* the number of reset */ + u32 reset_fail_cnt; /* the number of reset fail */ +}; + +/* time and register status when mac tunnel interruption occur */ +struct hclge_mac_tnl_stats { + u64 time; + u32 status; +}; + +#define HCLGE_RESET_INTERVAL (10 * HZ) +#define HCLGE_WAIT_RESET_DONE 100 + +#pragma pack(1) +struct hclge_vf_vlan_cfg { + u8 mbx_cmd; + u8 subcode; + union { + struct { + u8 is_kill; + __le16 vlan; + __le16 proto; + }; + u8 enable; + }; +}; + +#pragma pack() + +/* For each bit of TCAM entry, it uses a pair of 'x' and + * 'y' to indicate which value to match, like below: + * ---------------------------------- + * | bit x | bit y | search value | + * ---------------------------------- + * | 0 | 0 | always hit | + * ---------------------------------- + * | 1 | 0 | match '0' | + * ---------------------------------- + * | 0 | 1 | match '1' | + * ---------------------------------- + * | 1 | 1 | invalid | + * ---------------------------------- + * Then for input key(k) and mask(v), we can calculate the value by + * the formulae: + * x = (~k) & v + * y = (k ^ ~v) & k + */ +#define calc_x(x, k, v) (x = ~(k) & (v)) +#define calc_y(y, k, v) \ + do { \ + const typeof(k) _k_ = (k); \ + const typeof(v) _v_ = (v); \ + (y) = (_k_ ^ ~_v_) & (_k_); \ + } while (0) + +#define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f)) +#define HCLGE_STATS_READ(p, offset) (*(u64 *)((u8 *)(p) + (offset))) + +#define HCLGE_MAC_TNL_LOG_SIZE 8 +#define HCLGE_VPORT_NUM 256 +struct hclge_dev { + struct pci_dev *pdev; + struct hnae3_ae_dev *ae_dev; + struct hclge_hw hw; + struct hclge_misc_vector misc_vector; + struct hclge_mac_stats mac_stats; + struct hclge_fec_stats fec_stats; + unsigned long state; + unsigned long flr_state; + unsigned long last_reset_time; + + enum hnae3_reset_type reset_type; + enum hnae3_reset_type reset_level; + unsigned long default_reset_request; + unsigned long reset_request; /* reset has been requested */ + unsigned long reset_pending; /* client rst is pending to be served */ + struct hclge_rst_stats rst_stats; + struct semaphore reset_sem; /* protect reset process */ + u32 fw_version; + u16 num_tqps; /* Num task queue pairs of this PF */ + u16 num_req_vfs; /* Num VFs requested for this PF */ + + u16 base_tqp_pid; /* Base task tqp physical id of this PF */ + u16 alloc_rss_size; /* Allocated RSS task queue */ + u16 vf_rss_size_max; /* HW defined VF max RSS task queue */ + u16 pf_rss_size_max; /* HW defined PF max RSS task queue */ + u32 tx_spare_buf_size; /* HW defined TX spare buffer size */ + + u16 fdir_pf_filter_count; /* Num of guaranteed filters for this PF */ + u16 num_alloc_vport; /* Num vports this driver supports */ + u32 numa_node_mask; + u16 rx_buf_len; + u16 num_tx_desc; /* desc num of per tx queue */ + u16 num_rx_desc; /* desc num of per rx queue */ + u8 hw_tc_map; + enum hclge_fc_mode fc_mode_last_time; + u8 support_sfp_query; + +#define HCLGE_FLAG_TC_BASE_SCH_MODE 1 +#define HCLGE_FLAG_VNET_BASE_SCH_MODE 2 + u8 tx_sch_mode; + u8 tc_max; + u8 pfc_max; + + u8 default_up; + u8 dcbx_cap; + struct hclge_tm_info tm_info; + + u16 num_msi; + u16 num_msi_left; + u16 num_msi_used; + u16 *vector_status; + int *vector_irq; + u16 num_nic_msi; /* Num of nic vectors for this PF */ + u16 num_roce_msi; /* Num of roce vectors for this PF */ + + unsigned long service_timer_period; + unsigned long service_timer_previous; + struct timer_list reset_timer; + struct delayed_work service_task; + + bool cur_promisc; + int num_alloc_vfs; /* Actual number of VFs allocated */ + + struct hclge_comm_tqp *htqp; + struct hclge_vport *vport; + + struct dentry *hclge_dbgfs; + + struct hnae3_client *nic_client; + struct hnae3_client *roce_client; + +#define HCLGE_FLAG_MAIN BIT(0) +#define HCLGE_FLAG_DCB_CAPABLE BIT(1) + u32 flag; + + u32 pkt_buf_size; /* Total pf buf size for tx/rx */ + u32 tx_buf_size; /* Tx buffer size for each TC */ + u32 dv_buf_size; /* Dv buffer size for each TC */ + + u32 mps; /* Max packet size */ + /* vport_lock protect resource shared by vports */ + struct mutex vport_lock; + + struct hclge_vlan_type_cfg vlan_type_cfg; + + unsigned long vlan_table[VLAN_N_VID][BITS_TO_LONGS(HCLGE_VPORT_NUM)]; + unsigned long vf_vlan_full[BITS_TO_LONGS(HCLGE_VPORT_NUM)]; + + unsigned long vport_config_block[BITS_TO_LONGS(HCLGE_VPORT_NUM)]; + + struct hclge_fd_cfg fd_cfg; + struct hlist_head fd_rule_list; + spinlock_t fd_rule_lock; /* protect fd_rule_list and fd_bmap */ + u16 hclge_fd_rule_num; + unsigned long serv_processed_cnt; + unsigned long last_serv_processed; + unsigned long last_rst_scheduled; + unsigned long last_mbx_scheduled; + unsigned long fd_bmap[BITS_TO_LONGS(MAX_FD_FILTER_NUM)]; + enum HCLGE_FD_ACTIVE_RULE_TYPE fd_active_type; + u8 fd_en; + bool gro_en; + + u16 wanted_umv_size; + /* max available unicast mac vlan space */ + u16 max_umv_size; + /* private unicast mac vlan space, it's same for PF and its VFs */ + u16 priv_umv_size; + /* unicast mac vlan space shared by PF and its VFs */ + u16 share_umv_size; + /* multicast mac address number used by PF and its VFs */ + u16 used_mc_mac_num; + + DECLARE_KFIFO(mac_tnl_log, struct hclge_mac_tnl_stats, + HCLGE_MAC_TNL_LOG_SIZE); + + struct hclge_ptp *ptp; + struct devlink *devlink; + struct hclge_comm_rss_cfg rss_cfg; +}; + +/* VPort level vlan tag configuration for TX direction */ +struct hclge_tx_vtag_cfg { + bool accept_tag1; /* Whether accept tag1 packet from host */ + bool accept_untag1; /* Whether accept untag1 packet from host */ + bool accept_tag2; + bool accept_untag2; + bool insert_tag1_en; /* Whether insert inner vlan tag */ + bool insert_tag2_en; /* Whether insert outer vlan tag */ + u16 default_tag1; /* The default inner vlan tag to insert */ + u16 default_tag2; /* The default outer vlan tag to insert */ + bool tag_shift_mode_en; +}; + +/* VPort level vlan tag configuration for RX direction */ +struct hclge_rx_vtag_cfg { + bool rx_vlan_offload_en; /* Whether enable rx vlan offload */ + bool strip_tag1_en; /* Whether strip inner vlan tag */ + bool strip_tag2_en; /* Whether strip outer vlan tag */ + bool vlan1_vlan_prionly; /* Inner vlan tag up to descriptor enable */ + bool vlan2_vlan_prionly; /* Outer vlan tag up to descriptor enable */ + bool strip_tag1_discard_en; /* Inner vlan tag discard for BD enable */ + bool strip_tag2_discard_en; /* Outer vlan tag discard for BD enable */ +}; + +enum HCLGE_VPORT_STATE { + HCLGE_VPORT_STATE_ALIVE, + HCLGE_VPORT_STATE_MAC_TBL_CHANGE, + HCLGE_VPORT_STATE_PROMISC_CHANGE, + HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, + HCLGE_VPORT_STATE_INITED, + HCLGE_VPORT_STATE_MAX +}; + +enum HCLGE_VPORT_NEED_NOTIFY { + HCLGE_VPORT_NEED_NOTIFY_RESET, + HCLGE_VPORT_NEED_NOTIFY_VF_VLAN, +}; + +struct hclge_vlan_info { + u16 vlan_proto; /* so far support 802.1Q only */ + u16 qos; + u16 vlan_tag; +}; + +struct hclge_port_base_vlan_config { + u16 state; + bool tbl_sta; + struct hclge_vlan_info vlan_info; + struct hclge_vlan_info old_vlan_info; +}; + +struct hclge_vf_info { + int link_state; + u8 mac[ETH_ALEN]; + u32 spoofchk; + u32 max_tx_rate; + u32 trusted; + u8 request_uc_en; + u8 request_mc_en; + u8 request_bc_en; +}; + +struct hclge_vport { + u16 alloc_tqps; /* Allocated Tx/Rx queues */ + + u16 qs_offset; + u32 bw_limit; /* VSI BW Limit (0 = disabled) */ + u8 dwrr; + + bool req_vlan_fltr_en; + bool cur_vlan_fltr_en; + unsigned long vlan_del_fail_bmap[BITS_TO_LONGS(VLAN_N_VID)]; + struct hclge_port_base_vlan_config port_base_vlan_cfg; + struct hclge_tx_vtag_cfg txvlan_cfg; + struct hclge_rx_vtag_cfg rxvlan_cfg; + + u16 used_umv_num; + + u16 vport_id; + struct hclge_dev *back; /* Back reference to associated dev */ + struct hnae3_handle nic; + struct hnae3_handle roce; + + unsigned long state; + unsigned long need_notify; + unsigned long last_active_jiffies; + u32 mps; /* Max packet size */ + struct hclge_vf_info vf_info; + + u8 overflow_promisc_flags; + u8 last_promisc_flags; + + spinlock_t mac_list_lock; /* protect mac address need to add/detele */ + struct list_head uc_mac_list; /* Store VF unicast table */ + struct list_head mc_mac_list; /* Store VF multicast table */ + + struct list_head vlan_list; /* Store VF vlan table */ +}; + +struct hclge_speed_bit_map { + u32 speed; + u32 speed_bit; +}; + +struct hclge_mac_speed_map { + u32 speed_drv; /* speed defined in driver */ + u32 speed_fw; /* speed defined in firmware */ +}; + +int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc, + bool en_mc_pmc, bool en_bc_pmc); +int hclge_add_uc_addr_common(struct hclge_vport *vport, + const unsigned char *addr); +int hclge_rm_uc_addr_common(struct hclge_vport *vport, + const unsigned char *addr); +int hclge_add_mc_addr_common(struct hclge_vport *vport, + const unsigned char *addr); +int hclge_rm_mc_addr_common(struct hclge_vport *vport, + const unsigned char *addr); + +struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle); +int hclge_bind_ring_with_vector(struct hclge_vport *vport, + int vector_id, bool en, + struct hnae3_ring_chain_node *ring_chain); + +static inline int hclge_get_queue_id(struct hnae3_queue *queue) +{ + struct hclge_comm_tqp *tqp = + container_of(queue, struct hclge_comm_tqp, q); + + return tqp->index; +} + +int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport); +int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex, u8 lane_num); +int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, + u16 vlan_id, bool is_kill); +int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable); + +int hclge_buffer_alloc(struct hclge_dev *hdev); +int hclge_rss_init_hw(struct hclge_dev *hdev); + +void hclge_mbx_handler(struct hclge_dev *hdev); +int hclge_reset_tqp(struct hnae3_handle *handle); +int hclge_cfg_flowctrl(struct hclge_dev *hdev); +int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id); +int hclge_vport_start(struct hclge_vport *vport); +void hclge_vport_stop(struct hclge_vport *vport); +int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu); +int hclge_dbg_read_cmd(struct hnae3_handle *handle, enum hnae3_dbg_cmd cmd, + char *buf, int len); +u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id); +int hclge_notify_client(struct hclge_dev *hdev, + enum hnae3_reset_notify_type type); +int hclge_update_mac_list(struct hclge_vport *vport, + enum HCLGE_MAC_NODE_STATE state, + enum HCLGE_MAC_ADDR_TYPE mac_type, + const unsigned char *addr); +int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport, + const u8 *old_addr, const u8 *new_addr); +void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list, + enum HCLGE_MAC_ADDR_TYPE mac_type); +void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list); +void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev); +void hclge_restore_mac_table_common(struct hclge_vport *vport); +void hclge_restore_vport_port_base_vlan_config(struct hclge_dev *hdev); +void hclge_restore_vport_vlan_table(struct hclge_vport *vport); +int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state, + struct hclge_vlan_info *vlan_info); +int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid, + u16 state, + struct hclge_vlan_info *vlan_info); +void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time); +int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, + struct hclge_desc *desc); +void hclge_report_hw_error(struct hclge_dev *hdev, + enum hnae3_hw_error_type type); +void hclge_inform_vf_promisc_info(struct hclge_vport *vport); +int hclge_dbg_dump_rst_info(struct hclge_dev *hdev, char *buf, int len); +int hclge_push_vf_link_status(struct hclge_vport *vport); +int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en); +int hclge_mac_update_stats(struct hclge_dev *hdev); +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c new file mode 100644 index 000000000..04ff9bf12 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -0,0 +1,1152 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. + +#include "hclge_main.h" +#include "hclge_mbx.h" +#include "hnae3.h" +#include "hclge_comm_rss.h" + +#define CREATE_TRACE_POINTS +#include "hclge_trace.h" + +static u16 hclge_errno_to_resp(int errno) +{ + int resp = abs(errno); + + /* The status for pf to vf msg cmd is u16, constrainted by HW. + * We need to keep the same type with it. + * The intput errno is the stander error code, it's safely to + * use a u16 to store the abs(errno). + */ + return (u16)resp; +} + +/* hclge_gen_resp_to_vf: used to generate a synchronous response to VF when PF + * receives a mailbox message from VF. + * @vport: pointer to struct hclge_vport + * @vf_to_pf_req: pointer to hclge_mbx_vf_to_pf_cmd of the original mailbox + * message + * @resp_status: indicate to VF whether its request success(0) or failed. + */ +static int hclge_gen_resp_to_vf(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *vf_to_pf_req, + struct hclge_respond_to_vf_msg *resp_msg) +{ + struct hclge_mbx_pf_to_vf_cmd *resp_pf_to_vf; + struct hclge_dev *hdev = vport->back; + enum hclge_comm_cmd_status status; + struct hclge_desc desc; + u16 resp; + + resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data; + + if (resp_msg->len > HCLGE_MBX_MAX_RESP_DATA_SIZE) { + dev_err(&hdev->pdev->dev, + "PF fail to gen resp to VF len %u exceeds max len %u\n", + resp_msg->len, + HCLGE_MBX_MAX_RESP_DATA_SIZE); + /* If resp_msg->len is too long, set the value to max length + * and return the msg to VF + */ + resp_msg->len = HCLGE_MBX_MAX_RESP_DATA_SIZE; + } + + hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false); + + resp_pf_to_vf->dest_vfid = vf_to_pf_req->mbx_src_vfid; + resp_pf_to_vf->msg_len = vf_to_pf_req->msg_len; + resp_pf_to_vf->match_id = vf_to_pf_req->match_id; + + resp_pf_to_vf->msg.code = cpu_to_le16(HCLGE_MBX_PF_VF_RESP); + resp_pf_to_vf->msg.vf_mbx_msg_code = + cpu_to_le16(vf_to_pf_req->msg.code); + resp_pf_to_vf->msg.vf_mbx_msg_subcode = + cpu_to_le16(vf_to_pf_req->msg.subcode); + resp = hclge_errno_to_resp(resp_msg->status); + if (resp < SHRT_MAX) { + resp_pf_to_vf->msg.resp_status = cpu_to_le16(resp); + } else { + dev_warn(&hdev->pdev->dev, + "failed to send response to VF, response status %u is out-of-bound\n", + resp); + resp_pf_to_vf->msg.resp_status = cpu_to_le16(EIO); + } + + if (resp_msg->len > 0) + memcpy(resp_pf_to_vf->msg.resp_data, resp_msg->data, + resp_msg->len); + + trace_hclge_pf_mbx_send(hdev, resp_pf_to_vf); + + status = hclge_cmd_send(&hdev->hw, &desc, 1); + if (status) + dev_err(&hdev->pdev->dev, + "failed to send response to VF, status: %d, vfid: %u, code: %u, subcode: %u.\n", + status, vf_to_pf_req->mbx_src_vfid, + vf_to_pf_req->msg.code, vf_to_pf_req->msg.subcode); + + return status; +} + +static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len, + u16 mbx_opcode, u8 dest_vfid) +{ + struct hclge_mbx_pf_to_vf_cmd *resp_pf_to_vf; + struct hclge_dev *hdev = vport->back; + enum hclge_comm_cmd_status status; + struct hclge_desc desc; + + if (msg_len > HCLGE_MBX_MAX_MSG_SIZE) { + dev_err(&hdev->pdev->dev, + "msg data length(=%u) exceeds maximum(=%u)\n", + msg_len, HCLGE_MBX_MAX_MSG_SIZE); + return -EMSGSIZE; + } + + resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data; + + hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false); + + resp_pf_to_vf->dest_vfid = dest_vfid; + resp_pf_to_vf->msg_len = msg_len; + resp_pf_to_vf->msg.code = cpu_to_le16(mbx_opcode); + + memcpy(resp_pf_to_vf->msg.msg_data, msg, msg_len); + + trace_hclge_pf_mbx_send(hdev, resp_pf_to_vf); + + status = hclge_cmd_send(&hdev->hw, &desc, 1); + if (status) + dev_err(&hdev->pdev->dev, + "failed to send mailbox to VF, status: %d, vfid: %u, opcode: %u\n", + status, dest_vfid, mbx_opcode); + + return status; +} + +static int hclge_inform_vf_reset(struct hclge_vport *vport, u16 reset_type) +{ + __le16 msg_data; + u8 dest_vfid; + + dest_vfid = (u8)vport->vport_id; + msg_data = cpu_to_le16(reset_type); + + /* send this requested info to VF */ + return hclge_send_mbx_msg(vport, (u8 *)&msg_data, sizeof(msg_data), + HCLGE_MBX_ASSERTING_RESET, dest_vfid); +} + +int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport) +{ + struct hclge_dev *hdev = vport->back; + u16 reset_type; + + BUILD_BUG_ON(HNAE3_MAX_RESET > U16_MAX); + + if (hdev->reset_type == HNAE3_FUNC_RESET) + reset_type = HNAE3_VF_PF_FUNC_RESET; + else if (hdev->reset_type == HNAE3_FLR_RESET) + reset_type = HNAE3_VF_FULL_RESET; + else + reset_type = HNAE3_VF_FUNC_RESET; + + return hclge_inform_vf_reset(vport, reset_type); +} + +static void hclge_free_vector_ring_chain(struct hnae3_ring_chain_node *head) +{ + struct hnae3_ring_chain_node *chain_tmp, *chain; + + chain = head->next; + + while (chain) { + chain_tmp = chain->next; + kfree_sensitive(chain); + chain = chain_tmp; + } +} + +/* hclge_get_ring_chain_from_mbx: get ring type & tqp id & int_gl idx + * from mailbox message + * msg[0]: opcode + * msg[1]: + * msg[2]: ring_num + * msg[3]: first ring type (TX|RX) + * msg[4]: first tqp id + * msg[5]: first int_gl idx + * msg[6] ~ msg[14]: other ring type, tqp id and int_gl idx + */ +static int hclge_get_ring_chain_from_mbx( + struct hclge_mbx_vf_to_pf_cmd *req, + struct hnae3_ring_chain_node *ring_chain, + struct hclge_vport *vport) +{ + struct hnae3_ring_chain_node *cur_chain, *new_chain; + struct hclge_dev *hdev = vport->back; + int ring_num; + int i; + + ring_num = req->msg.ring_num; + + if (ring_num > HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM) + return -EINVAL; + + for (i = 0; i < ring_num; i++) { + if (req->msg.param[i].tqp_index >= vport->nic.kinfo.rss_size) { + dev_err(&hdev->pdev->dev, "tqp index(%u) is out of range(0-%u)\n", + req->msg.param[i].tqp_index, + vport->nic.kinfo.rss_size - 1U); + return -EINVAL; + } + } + + hnae3_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B, + req->msg.param[0].ring_type); + ring_chain->tqp_index = + hclge_get_queue_id(vport->nic.kinfo.tqp + [req->msg.param[0].tqp_index]); + hnae3_set_field(ring_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, + HNAE3_RING_GL_IDX_S, req->msg.param[0].int_gl_index); + + cur_chain = ring_chain; + + for (i = 1; i < ring_num; i++) { + new_chain = kzalloc(sizeof(*new_chain), GFP_KERNEL); + if (!new_chain) + goto err; + + hnae3_set_bit(new_chain->flag, HNAE3_RING_TYPE_B, + req->msg.param[i].ring_type); + + new_chain->tqp_index = + hclge_get_queue_id(vport->nic.kinfo.tqp + [req->msg.param[i].tqp_index]); + + hnae3_set_field(new_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, + HNAE3_RING_GL_IDX_S, + req->msg.param[i].int_gl_index); + + cur_chain->next = new_chain; + cur_chain = new_chain; + } + + return 0; +err: + hclge_free_vector_ring_chain(ring_chain); + return -ENOMEM; +} + +static int hclge_map_unmap_ring_to_vf_vector(struct hclge_vport *vport, bool en, + struct hclge_mbx_vf_to_pf_cmd *req) +{ + struct hnae3_ring_chain_node ring_chain; + int vector_id = req->msg.vector_id; + int ret; + + memset(&ring_chain, 0, sizeof(ring_chain)); + ret = hclge_get_ring_chain_from_mbx(req, &ring_chain, vport); + if (ret) + return ret; + + ret = hclge_bind_ring_with_vector(vport, vector_id, en, &ring_chain); + + hclge_free_vector_ring_chain(&ring_chain); + + return ret; +} + +static int hclge_query_ring_vector_map(struct hclge_vport *vport, + struct hnae3_ring_chain_node *ring_chain, + struct hclge_desc *desc) +{ + struct hclge_ctrl_vector_chain_cmd *req = + (struct hclge_ctrl_vector_chain_cmd *)desc->data; + struct hclge_dev *hdev = vport->back; + u16 tqp_type_and_id; + int status; + + hclge_cmd_setup_basic_desc(desc, HCLGE_OPC_ADD_RING_TO_VECTOR, true); + + tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[0]); + hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M, HCLGE_INT_TYPE_S, + hnae3_get_bit(ring_chain->flag, HNAE3_RING_TYPE_B)); + hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M, HCLGE_TQP_ID_S, + ring_chain->tqp_index); + req->tqp_type_and_id[0] = cpu_to_le16(tqp_type_and_id); + req->vfid = vport->vport_id; + + status = hclge_cmd_send(&hdev->hw, desc, 1); + if (status) + dev_err(&hdev->pdev->dev, + "Get VF ring vector map info fail, status is %d.\n", + status); + + return status; +} + +static int hclge_get_vf_ring_vector_map(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *req, + struct hclge_respond_to_vf_msg *resp) +{ +#define HCLGE_LIMIT_RING_NUM 1 +#define HCLGE_RING_TYPE_OFFSET 0 +#define HCLGE_TQP_INDEX_OFFSET 1 +#define HCLGE_INT_GL_INDEX_OFFSET 2 +#define HCLGE_VECTOR_ID_OFFSET 3 +#define HCLGE_RING_VECTOR_MAP_INFO_LEN 4 + struct hnae3_ring_chain_node ring_chain; + struct hclge_desc desc; + struct hclge_ctrl_vector_chain_cmd *data = + (struct hclge_ctrl_vector_chain_cmd *)desc.data; + u16 tqp_type_and_id; + u8 int_gl_index; + int ret; + + req->msg.ring_num = HCLGE_LIMIT_RING_NUM; + + memset(&ring_chain, 0, sizeof(ring_chain)); + ret = hclge_get_ring_chain_from_mbx(req, &ring_chain, vport); + if (ret) + return ret; + + ret = hclge_query_ring_vector_map(vport, &ring_chain, &desc); + if (ret) { + hclge_free_vector_ring_chain(&ring_chain); + return ret; + } + + tqp_type_and_id = le16_to_cpu(data->tqp_type_and_id[0]); + int_gl_index = hnae3_get_field(tqp_type_and_id, + HCLGE_INT_GL_IDX_M, HCLGE_INT_GL_IDX_S); + + resp->data[HCLGE_RING_TYPE_OFFSET] = req->msg.param[0].ring_type; + resp->data[HCLGE_TQP_INDEX_OFFSET] = req->msg.param[0].tqp_index; + resp->data[HCLGE_INT_GL_INDEX_OFFSET] = int_gl_index; + resp->data[HCLGE_VECTOR_ID_OFFSET] = data->int_vector_id_l; + resp->len = HCLGE_RING_VECTOR_MAP_INFO_LEN; + + hclge_free_vector_ring_chain(&ring_chain); + + return ret; +} + +static void hclge_set_vf_promisc_mode(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *req) +{ + struct hnae3_handle *handle = &vport->nic; + struct hclge_dev *hdev = vport->back; + + vport->vf_info.request_uc_en = req->msg.en_uc; + vport->vf_info.request_mc_en = req->msg.en_mc; + vport->vf_info.request_bc_en = req->msg.en_bc; + + if (req->msg.en_limit_promisc) + set_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags); + else + clear_bit(HNAE3_PFLAG_LIMIT_PROMISC, + &handle->priv_flags); + + set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state); + hclge_task_schedule(hdev, 0); +} + +static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req) +{ +#define HCLGE_MBX_VF_OLD_MAC_ADDR_OFFSET 6 + + const u8 *mac_addr = (const u8 *)(mbx_req->msg.data); + struct hclge_dev *hdev = vport->back; + int status; + + if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_UC_MODIFY) { + const u8 *old_addr = (const u8 *) + (&mbx_req->msg.data[HCLGE_MBX_VF_OLD_MAC_ADDR_OFFSET]); + + /* If VF MAC has been configured by the host then it + * cannot be overridden by the MAC specified by the VM. + */ + if (!is_zero_ether_addr(vport->vf_info.mac) && + !ether_addr_equal(mac_addr, vport->vf_info.mac)) + return -EPERM; + + if (!is_valid_ether_addr(mac_addr)) + return -EINVAL; + + spin_lock_bh(&vport->mac_list_lock); + status = hclge_update_mac_node_for_dev_addr(vport, old_addr, + mac_addr); + spin_unlock_bh(&vport->mac_list_lock); + hclge_task_schedule(hdev, 0); + } else if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_UC_ADD) { + status = hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, + HCLGE_MAC_ADDR_UC, mac_addr); + } else if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_UC_REMOVE) { + status = hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, + HCLGE_MAC_ADDR_UC, mac_addr); + } else { + dev_err(&hdev->pdev->dev, + "failed to set unicast mac addr, unknown subcode %u\n", + mbx_req->msg.subcode); + return -EIO; + } + + return status; +} + +static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req) +{ + const u8 *mac_addr = (const u8 *)(mbx_req->msg.data); + struct hclge_dev *hdev = vport->back; + + if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_MC_ADD) { + hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, + HCLGE_MAC_ADDR_MC, mac_addr); + } else if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_MC_REMOVE) { + hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, + HCLGE_MAC_ADDR_MC, mac_addr); + } else { + dev_err(&hdev->pdev->dev, + "failed to set mcast mac addr, unknown subcode %u\n", + mbx_req->msg.subcode); + return -EIO; + } + + return 0; +} + +int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid, + u16 state, + struct hclge_vlan_info *vlan_info) +{ + struct hclge_mbx_port_base_vlan base_vlan; + + base_vlan.state = cpu_to_le16(state); + base_vlan.vlan_proto = cpu_to_le16(vlan_info->vlan_proto); + base_vlan.qos = cpu_to_le16(vlan_info->qos); + base_vlan.vlan_tag = cpu_to_le16(vlan_info->vlan_tag); + + return hclge_send_mbx_msg(vport, (u8 *)&base_vlan, sizeof(base_vlan), + HCLGE_MBX_PUSH_VLAN_INFO, vfid); +} + +static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req, + struct hclge_respond_to_vf_msg *resp_msg) +{ +#define HCLGE_MBX_VLAN_STATE_OFFSET 0 +#define HCLGE_MBX_VLAN_INFO_OFFSET 2 + + struct hnae3_handle *handle = &vport->nic; + struct hclge_dev *hdev = vport->back; + struct hclge_vf_vlan_cfg *msg_cmd; + __be16 proto; + u16 vlan_id; + + msg_cmd = (struct hclge_vf_vlan_cfg *)&mbx_req->msg; + switch (msg_cmd->subcode) { + case HCLGE_MBX_VLAN_FILTER: + proto = cpu_to_be16(le16_to_cpu(msg_cmd->proto)); + vlan_id = le16_to_cpu(msg_cmd->vlan); + return hclge_set_vlan_filter(handle, proto, vlan_id, + msg_cmd->is_kill); + case HCLGE_MBX_VLAN_RX_OFF_CFG: + return hclge_en_hw_strip_rxvtag(handle, msg_cmd->enable); + case HCLGE_MBX_GET_PORT_BASE_VLAN_STATE: + /* vf does not need to know about the port based VLAN state + * on device HNAE3_DEVICE_VERSION_V3. So always return disable + * on device HNAE3_DEVICE_VERSION_V3 if vf queries the port + * based VLAN state. + */ + resp_msg->data[0] = + hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3 ? + HNAE3_PORT_BASE_VLAN_DISABLE : + vport->port_base_vlan_cfg.state; + resp_msg->len = sizeof(u8); + return 0; + case HCLGE_MBX_ENABLE_VLAN_FILTER: + return hclge_enable_vport_vlan_filter(vport, msg_cmd->enable); + default: + return 0; + } +} + +static int hclge_set_vf_alive(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req) +{ + bool alive = !!mbx_req->msg.data[0]; + int ret = 0; + + if (alive) + ret = hclge_vport_start(vport); + else + hclge_vport_stop(vport); + + return ret; +} + +static void hclge_get_basic_info(struct hclge_vport *vport, + struct hclge_respond_to_vf_msg *resp_msg) +{ + struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; + struct hnae3_ae_dev *ae_dev = vport->back->ae_dev; + struct hclge_basic_info *basic_info; + unsigned int i; + u32 pf_caps; + + basic_info = (struct hclge_basic_info *)resp_msg->data; + for (i = 0; i < kinfo->tc_info.num_tc; i++) + basic_info->hw_tc_map |= BIT(i); + + pf_caps = le32_to_cpu(basic_info->pf_caps); + if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps)) + hnae3_set_bit(pf_caps, HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B, 1); + + basic_info->pf_caps = cpu_to_le32(pf_caps); + resp_msg->len = HCLGE_MBX_MAX_RESP_DATA_SIZE; +} + +static void hclge_get_vf_queue_info(struct hclge_vport *vport, + struct hclge_respond_to_vf_msg *resp_msg) +{ +#define HCLGE_TQPS_RSS_INFO_LEN 6 + + struct hclge_mbx_vf_queue_info *queue_info; + struct hclge_dev *hdev = vport->back; + + /* get the queue related info */ + queue_info = (struct hclge_mbx_vf_queue_info *)resp_msg->data; + queue_info->num_tqps = cpu_to_le16(vport->alloc_tqps); + queue_info->rss_size = cpu_to_le16(vport->nic.kinfo.rss_size); + queue_info->rx_buf_len = cpu_to_le16(hdev->rx_buf_len); + resp_msg->len = HCLGE_TQPS_RSS_INFO_LEN; +} + +static void hclge_get_vf_mac_addr(struct hclge_vport *vport, + struct hclge_respond_to_vf_msg *resp_msg) +{ + ether_addr_copy(resp_msg->data, vport->vf_info.mac); + resp_msg->len = ETH_ALEN; +} + +static void hclge_get_vf_queue_depth(struct hclge_vport *vport, + struct hclge_respond_to_vf_msg *resp_msg) +{ +#define HCLGE_TQPS_DEPTH_INFO_LEN 4 + + struct hclge_mbx_vf_queue_depth *queue_depth; + struct hclge_dev *hdev = vport->back; + + /* get the queue depth info */ + queue_depth = (struct hclge_mbx_vf_queue_depth *)resp_msg->data; + queue_depth->num_tx_desc = cpu_to_le16(hdev->num_tx_desc); + queue_depth->num_rx_desc = cpu_to_le16(hdev->num_rx_desc); + + resp_msg->len = HCLGE_TQPS_DEPTH_INFO_LEN; +} + +static void hclge_get_vf_media_type(struct hclge_vport *vport, + struct hclge_respond_to_vf_msg *resp_msg) +{ +#define HCLGE_VF_MEDIA_TYPE_OFFSET 0 +#define HCLGE_VF_MODULE_TYPE_OFFSET 1 +#define HCLGE_VF_MEDIA_TYPE_LENGTH 2 + + struct hclge_dev *hdev = vport->back; + + resp_msg->data[HCLGE_VF_MEDIA_TYPE_OFFSET] = + hdev->hw.mac.media_type; + resp_msg->data[HCLGE_VF_MODULE_TYPE_OFFSET] = + hdev->hw.mac.module_type; + resp_msg->len = HCLGE_VF_MEDIA_TYPE_LENGTH; +} + +int hclge_push_vf_link_status(struct hclge_vport *vport) +{ +#define HCLGE_VF_LINK_STATE_UP 1U +#define HCLGE_VF_LINK_STATE_DOWN 0U + + struct hclge_mbx_link_status link_info; + struct hclge_dev *hdev = vport->back; + u16 link_status; + + /* mac.link can only be 0 or 1 */ + switch (vport->vf_info.link_state) { + case IFLA_VF_LINK_STATE_ENABLE: + link_status = HCLGE_VF_LINK_STATE_UP; + break; + case IFLA_VF_LINK_STATE_DISABLE: + link_status = HCLGE_VF_LINK_STATE_DOWN; + break; + case IFLA_VF_LINK_STATE_AUTO: + default: + link_status = (u16)hdev->hw.mac.link; + break; + } + + link_info.link_status = cpu_to_le16(link_status); + link_info.speed = cpu_to_le32(hdev->hw.mac.speed); + link_info.duplex = cpu_to_le16(hdev->hw.mac.duplex); + link_info.flag = HCLGE_MBX_PUSH_LINK_STATUS_EN; + + /* send this requested info to VF */ + return hclge_send_mbx_msg(vport, (u8 *)&link_info, sizeof(link_info), + HCLGE_MBX_LINK_STAT_CHANGE, vport->vport_id); +} + +static void hclge_get_link_mode(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req) +{ +#define HCLGE_SUPPORTED 1 + struct hclge_mbx_link_mode link_mode; + struct hclge_dev *hdev = vport->back; + unsigned long advertising; + unsigned long supported; + unsigned long send_data; + u8 dest_vfid; + + advertising = hdev->hw.mac.advertising[0]; + supported = hdev->hw.mac.supported[0]; + dest_vfid = mbx_req->mbx_src_vfid; + send_data = mbx_req->msg.data[0] == HCLGE_SUPPORTED ? supported : + advertising; + link_mode.idx = cpu_to_le16((u16)mbx_req->msg.data[0]); + link_mode.link_mode = cpu_to_le64(send_data); + + hclge_send_mbx_msg(vport, (u8 *)&link_mode, sizeof(link_mode), + HCLGE_MBX_LINK_STAT_MODE, dest_vfid); +} + +static int hclge_mbx_reset_vf_queue(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req, + struct hclge_respond_to_vf_msg *resp_msg) +{ +#define HCLGE_RESET_ALL_QUEUE_DONE 1U + struct hnae3_handle *handle = &vport->nic; + struct hclge_dev *hdev = vport->back; + u16 queue_id; + int ret; + + queue_id = le16_to_cpu(*(__le16 *)mbx_req->msg.data); + resp_msg->data[0] = HCLGE_RESET_ALL_QUEUE_DONE; + resp_msg->len = sizeof(u8); + + /* pf will reset vf's all queues at a time. So it is unnecessary + * to reset queues if queue_id > 0, just return success. + */ + if (queue_id > 0) + return 0; + + ret = hclge_reset_tqp(handle); + if (ret) + dev_err(&hdev->pdev->dev, "failed to reset vf %u queue, ret = %d\n", + vport->vport_id - HCLGE_VF_VPORT_START_NUM, ret); + + return ret; +} + +static int hclge_reset_vf(struct hclge_vport *vport) +{ + struct hclge_dev *hdev = vport->back; + + dev_warn(&hdev->pdev->dev, "PF received VF reset request from VF %u!", + vport->vport_id - HCLGE_VF_VPORT_START_NUM); + + return hclge_func_reset_cmd(hdev, vport->vport_id); +} + +static void hclge_notify_vf_config(struct hclge_vport *vport) +{ + struct hclge_dev *hdev = vport->back; + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); + struct hclge_port_base_vlan_config *vlan_cfg; + int ret; + + hclge_push_vf_link_status(vport); + if (test_bit(HCLGE_VPORT_NEED_NOTIFY_RESET, &vport->need_notify)) { + ret = hclge_inform_vf_reset(vport, HNAE3_VF_PF_FUNC_RESET); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to inform VF %u reset!", + vport->vport_id - HCLGE_VF_VPORT_START_NUM); + return; + } + vport->need_notify = 0; + return; + } + + if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 && + test_bit(HCLGE_VPORT_NEED_NOTIFY_VF_VLAN, &vport->need_notify)) { + vlan_cfg = &vport->port_base_vlan_cfg; + ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0], + vport->vport_id, + vlan_cfg->state, + &vlan_cfg->vlan_info); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to inform VF %u port base vlan!", + vport->vport_id - HCLGE_VF_VPORT_START_NUM); + return; + } + clear_bit(HCLGE_VPORT_NEED_NOTIFY_VF_VLAN, &vport->need_notify); + } +} + +static void hclge_vf_keep_alive(struct hclge_vport *vport) +{ + struct hclge_dev *hdev = vport->back; + + vport->last_active_jiffies = jiffies; + + if (test_bit(HCLGE_VPORT_STATE_INITED, &vport->state) && + !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) { + set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); + dev_info(&hdev->pdev->dev, "VF %u is alive!", + vport->vport_id - HCLGE_VF_VPORT_START_NUM); + hclge_notify_vf_config(vport); + } +} + +static int hclge_set_vf_mtu(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req) +{ + struct hclge_mbx_mtu_info *mtu_info; + u32 mtu; + + mtu_info = (struct hclge_mbx_mtu_info *)mbx_req->msg.data; + mtu = le32_to_cpu(mtu_info->mtu); + + return hclge_set_vport_mtu(vport, mtu); +} + +static int hclge_get_queue_id_in_pf(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req, + struct hclge_respond_to_vf_msg *resp_msg) +{ + struct hnae3_handle *handle = &vport->nic; + struct hclge_dev *hdev = vport->back; + u16 queue_id, qid_in_pf; + + queue_id = le16_to_cpu(*(__le16 *)mbx_req->msg.data); + if (queue_id >= handle->kinfo.num_tqps) { + dev_err(&hdev->pdev->dev, "Invalid queue id(%u) from VF %u\n", + queue_id, mbx_req->mbx_src_vfid); + return -EINVAL; + } + + qid_in_pf = hclge_covert_handle_qid_global(&vport->nic, queue_id); + *(__le16 *)resp_msg->data = cpu_to_le16(qid_in_pf); + resp_msg->len = sizeof(qid_in_pf); + return 0; +} + +static int hclge_get_rss_key(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req, + struct hclge_respond_to_vf_msg *resp_msg) +{ +#define HCLGE_RSS_MBX_RESP_LEN 8 + struct hclge_dev *hdev = vport->back; + struct hclge_comm_rss_cfg *rss_cfg; + u8 index; + + index = mbx_req->msg.data[0]; + rss_cfg = &hdev->rss_cfg; + + /* Check the query index of rss_hash_key from VF, make sure no + * more than the size of rss_hash_key. + */ + if (((index + 1) * HCLGE_RSS_MBX_RESP_LEN) > + sizeof(rss_cfg->rss_hash_key)) { + dev_warn(&hdev->pdev->dev, + "failed to get the rss hash key, the index(%u) invalid !\n", + index); + return -EINVAL; + } + + memcpy(resp_msg->data, + &rss_cfg->rss_hash_key[index * HCLGE_RSS_MBX_RESP_LEN], + HCLGE_RSS_MBX_RESP_LEN); + resp_msg->len = HCLGE_RSS_MBX_RESP_LEN; + return 0; +} + +static void hclge_link_fail_parse(struct hclge_dev *hdev, u8 link_fail_code) +{ + switch (link_fail_code) { + case HCLGE_LF_REF_CLOCK_LOST: + dev_warn(&hdev->pdev->dev, "Reference clock lost!\n"); + break; + case HCLGE_LF_XSFP_TX_DISABLE: + dev_warn(&hdev->pdev->dev, "SFP tx is disabled!\n"); + break; + case HCLGE_LF_XSFP_ABSENT: + dev_warn(&hdev->pdev->dev, "SFP is absent!\n"); + break; + default: + break; + } +} + +static void hclge_handle_link_change_event(struct hclge_dev *hdev, + struct hclge_mbx_vf_to_pf_cmd *req) +{ + hclge_task_schedule(hdev, 0); + + if (!req->msg.subcode) + hclge_link_fail_parse(hdev, req->msg.data[0]); +} + +static bool hclge_cmd_crq_empty(struct hclge_hw *hw) +{ + u32 tail = hclge_read_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG); + + return tail == hw->hw.cmq.crq.next_to_use; +} + +static void hclge_handle_ncsi_error(struct hclge_dev *hdev) +{ + struct hnae3_ae_dev *ae_dev = hdev->ae_dev; + + ae_dev->ops->set_default_reset_request(ae_dev, HNAE3_GLOBAL_RESET); + dev_warn(&hdev->pdev->dev, "requesting reset due to NCSI error\n"); + ae_dev->ops->reset_event(hdev->pdev, NULL); +} + +static void hclge_handle_vf_tbl(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req) +{ + struct hclge_dev *hdev = vport->back; + struct hclge_vf_vlan_cfg *msg_cmd; + + msg_cmd = (struct hclge_vf_vlan_cfg *)&mbx_req->msg; + if (msg_cmd->subcode == HCLGE_MBX_VPORT_LIST_CLEAR) { + hclge_rm_vport_all_mac_table(vport, true, HCLGE_MAC_ADDR_UC); + hclge_rm_vport_all_mac_table(vport, true, HCLGE_MAC_ADDR_MC); + hclge_rm_vport_all_vlan_table(vport, true); + } else { + dev_warn(&hdev->pdev->dev, "Invalid cmd(%u)\n", + msg_cmd->subcode); + } +} + +static int +hclge_mbx_map_ring_to_vector_handler(struct hclge_mbx_ops_param *param) +{ + return hclge_map_unmap_ring_to_vf_vector(param->vport, true, + param->req); +} + +static int +hclge_mbx_unmap_ring_to_vector_handler(struct hclge_mbx_ops_param *param) +{ + return hclge_map_unmap_ring_to_vf_vector(param->vport, false, + param->req); +} + +static int +hclge_mbx_get_ring_vector_map_handler(struct hclge_mbx_ops_param *param) +{ + int ret; + + ret = hclge_get_vf_ring_vector_map(param->vport, param->req, + param->resp_msg); + if (ret) + dev_err(¶m->vport->back->pdev->dev, + "PF fail(%d) to get VF ring vector map\n", + ret); + return ret; +} + +static int hclge_mbx_set_promisc_mode_handler(struct hclge_mbx_ops_param *param) +{ + hclge_set_vf_promisc_mode(param->vport, param->req); + return 0; +} + +static int hclge_mbx_set_unicast_handler(struct hclge_mbx_ops_param *param) +{ + int ret; + + ret = hclge_set_vf_uc_mac_addr(param->vport, param->req); + if (ret) + dev_err(¶m->vport->back->pdev->dev, + "PF fail(%d) to set VF UC MAC Addr\n", + ret); + return ret; +} + +static int hclge_mbx_set_multicast_handler(struct hclge_mbx_ops_param *param) +{ + int ret; + + ret = hclge_set_vf_mc_mac_addr(param->vport, param->req); + if (ret) + dev_err(¶m->vport->back->pdev->dev, + "PF fail(%d) to set VF MC MAC Addr\n", + ret); + return ret; +} + +static int hclge_mbx_set_vlan_handler(struct hclge_mbx_ops_param *param) +{ + int ret; + + ret = hclge_set_vf_vlan_cfg(param->vport, param->req, param->resp_msg); + if (ret) + dev_err(¶m->vport->back->pdev->dev, + "PF failed(%d) to config VF's VLAN\n", + ret); + return ret; +} + +static int hclge_mbx_set_alive_handler(struct hclge_mbx_ops_param *param) +{ + int ret; + + ret = hclge_set_vf_alive(param->vport, param->req); + if (ret) + dev_err(¶m->vport->back->pdev->dev, + "PF failed(%d) to set VF's ALIVE\n", + ret); + return ret; +} + +static int hclge_mbx_get_qinfo_handler(struct hclge_mbx_ops_param *param) +{ + hclge_get_vf_queue_info(param->vport, param->resp_msg); + return 0; +} + +static int hclge_mbx_get_qdepth_handler(struct hclge_mbx_ops_param *param) +{ + hclge_get_vf_queue_depth(param->vport, param->resp_msg); + return 0; +} + +static int hclge_mbx_get_basic_info_handler(struct hclge_mbx_ops_param *param) +{ + hclge_get_basic_info(param->vport, param->resp_msg); + return 0; +} + +static int hclge_mbx_get_link_status_handler(struct hclge_mbx_ops_param *param) +{ + int ret; + + ret = hclge_push_vf_link_status(param->vport); + if (ret) + dev_err(¶m->vport->back->pdev->dev, + "failed to inform link stat to VF, ret = %d\n", + ret); + return ret; +} + +static int hclge_mbx_queue_reset_handler(struct hclge_mbx_ops_param *param) +{ + return hclge_mbx_reset_vf_queue(param->vport, param->req, + param->resp_msg); +} + +static int hclge_mbx_reset_handler(struct hclge_mbx_ops_param *param) +{ + return hclge_reset_vf(param->vport); +} + +static int hclge_mbx_keep_alive_handler(struct hclge_mbx_ops_param *param) +{ + hclge_vf_keep_alive(param->vport); + return 0; +} + +static int hclge_mbx_set_mtu_handler(struct hclge_mbx_ops_param *param) +{ + int ret; + + ret = hclge_set_vf_mtu(param->vport, param->req); + if (ret) + dev_err(¶m->vport->back->pdev->dev, + "VF fail(%d) to set mtu\n", ret); + return ret; +} + +static int hclge_mbx_get_qid_in_pf_handler(struct hclge_mbx_ops_param *param) +{ + return hclge_get_queue_id_in_pf(param->vport, param->req, + param->resp_msg); +} + +static int hclge_mbx_get_rss_key_handler(struct hclge_mbx_ops_param *param) +{ + return hclge_get_rss_key(param->vport, param->req, param->resp_msg); +} + +static int hclge_mbx_get_link_mode_handler(struct hclge_mbx_ops_param *param) +{ + hclge_get_link_mode(param->vport, param->req); + return 0; +} + +static int +hclge_mbx_get_vf_flr_status_handler(struct hclge_mbx_ops_param *param) +{ + hclge_rm_vport_all_mac_table(param->vport, false, + HCLGE_MAC_ADDR_UC); + hclge_rm_vport_all_mac_table(param->vport, false, + HCLGE_MAC_ADDR_MC); + hclge_rm_vport_all_vlan_table(param->vport, false); + return 0; +} + +static int hclge_mbx_vf_uninit_handler(struct hclge_mbx_ops_param *param) +{ + hclge_rm_vport_all_mac_table(param->vport, true, + HCLGE_MAC_ADDR_UC); + hclge_rm_vport_all_mac_table(param->vport, true, + HCLGE_MAC_ADDR_MC); + hclge_rm_vport_all_vlan_table(param->vport, true); + param->vport->mps = 0; + return 0; +} + +static int hclge_mbx_get_media_type_handler(struct hclge_mbx_ops_param *param) +{ + hclge_get_vf_media_type(param->vport, param->resp_msg); + return 0; +} + +static int hclge_mbx_push_link_status_handler(struct hclge_mbx_ops_param *param) +{ + hclge_handle_link_change_event(param->vport->back, param->req); + return 0; +} + +static int hclge_mbx_get_mac_addr_handler(struct hclge_mbx_ops_param *param) +{ + hclge_get_vf_mac_addr(param->vport, param->resp_msg); + return 0; +} + +static int hclge_mbx_ncsi_error_handler(struct hclge_mbx_ops_param *param) +{ + hclge_handle_ncsi_error(param->vport->back); + return 0; +} + +static int hclge_mbx_handle_vf_tbl_handler(struct hclge_mbx_ops_param *param) +{ + hclge_handle_vf_tbl(param->vport, param->req); + return 0; +} + +static const hclge_mbx_ops_fn hclge_mbx_ops_list[HCLGE_MBX_OPCODE_MAX] = { + [HCLGE_MBX_RESET] = hclge_mbx_reset_handler, + [HCLGE_MBX_SET_UNICAST] = hclge_mbx_set_unicast_handler, + [HCLGE_MBX_SET_MULTICAST] = hclge_mbx_set_multicast_handler, + [HCLGE_MBX_SET_VLAN] = hclge_mbx_set_vlan_handler, + [HCLGE_MBX_MAP_RING_TO_VECTOR] = hclge_mbx_map_ring_to_vector_handler, + [HCLGE_MBX_UNMAP_RING_TO_VECTOR] = hclge_mbx_unmap_ring_to_vector_handler, + [HCLGE_MBX_SET_PROMISC_MODE] = hclge_mbx_set_promisc_mode_handler, + [HCLGE_MBX_GET_QINFO] = hclge_mbx_get_qinfo_handler, + [HCLGE_MBX_GET_QDEPTH] = hclge_mbx_get_qdepth_handler, + [HCLGE_MBX_GET_BASIC_INFO] = hclge_mbx_get_basic_info_handler, + [HCLGE_MBX_GET_RSS_KEY] = hclge_mbx_get_rss_key_handler, + [HCLGE_MBX_GET_MAC_ADDR] = hclge_mbx_get_mac_addr_handler, + [HCLGE_MBX_GET_LINK_STATUS] = hclge_mbx_get_link_status_handler, + [HCLGE_MBX_QUEUE_RESET] = hclge_mbx_queue_reset_handler, + [HCLGE_MBX_KEEP_ALIVE] = hclge_mbx_keep_alive_handler, + [HCLGE_MBX_SET_ALIVE] = hclge_mbx_set_alive_handler, + [HCLGE_MBX_SET_MTU] = hclge_mbx_set_mtu_handler, + [HCLGE_MBX_GET_QID_IN_PF] = hclge_mbx_get_qid_in_pf_handler, + [HCLGE_MBX_GET_LINK_MODE] = hclge_mbx_get_link_mode_handler, + [HCLGE_MBX_GET_MEDIA_TYPE] = hclge_mbx_get_media_type_handler, + [HCLGE_MBX_VF_UNINIT] = hclge_mbx_vf_uninit_handler, + [HCLGE_MBX_HANDLE_VF_TBL] = hclge_mbx_handle_vf_tbl_handler, + [HCLGE_MBX_GET_RING_VECTOR_MAP] = hclge_mbx_get_ring_vector_map_handler, + [HCLGE_MBX_GET_VF_FLR_STATUS] = hclge_mbx_get_vf_flr_status_handler, + [HCLGE_MBX_PUSH_LINK_STATUS] = hclge_mbx_push_link_status_handler, + [HCLGE_MBX_NCSI_ERROR] = hclge_mbx_ncsi_error_handler, +}; + +static void hclge_mbx_request_handling(struct hclge_mbx_ops_param *param) +{ + hclge_mbx_ops_fn cmd_func = NULL; + struct hclge_dev *hdev; + int ret = 0; + + hdev = param->vport->back; + cmd_func = hclge_mbx_ops_list[param->req->msg.code]; + if (cmd_func) + ret = cmd_func(param); + else + dev_err(&hdev->pdev->dev, + "un-supported mailbox message, code = %u\n", + param->req->msg.code); + + /* PF driver should not reply IMP */ + if (hnae3_get_bit(param->req->mbx_need_resp, HCLGE_MBX_NEED_RESP_B) && + param->req->msg.code < HCLGE_MBX_GET_VF_FLR_STATUS) { + param->resp_msg->status = ret; + if (time_is_before_jiffies(hdev->last_mbx_scheduled + + HCLGE_MBX_SCHED_TIMEOUT)) + dev_warn(&hdev->pdev->dev, + "resp vport%u mbx(%u,%u) late\n", + param->req->mbx_src_vfid, + param->req->msg.code, + param->req->msg.subcode); + + hclge_gen_resp_to_vf(param->vport, param->req, param->resp_msg); + } +} + +void hclge_mbx_handler(struct hclge_dev *hdev) +{ + struct hclge_comm_cmq_ring *crq = &hdev->hw.hw.cmq.crq; + struct hclge_respond_to_vf_msg resp_msg; + struct hclge_mbx_vf_to_pf_cmd *req; + struct hclge_mbx_ops_param param; + struct hclge_desc *desc; + unsigned int flag; + + param.resp_msg = &resp_msg; + /* handle all the mailbox requests in the queue */ + while (!hclge_cmd_crq_empty(&hdev->hw)) { + if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, + &hdev->hw.hw.comm_state)) { + dev_warn(&hdev->pdev->dev, + "command queue needs re-initializing\n"); + return; + } + + desc = &crq->desc[crq->next_to_use]; + req = (struct hclge_mbx_vf_to_pf_cmd *)desc->data; + + flag = le16_to_cpu(crq->desc[crq->next_to_use].flag); + if (unlikely(!hnae3_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B))) { + dev_warn(&hdev->pdev->dev, + "dropped invalid mailbox message, code = %u\n", + req->msg.code); + + /* dropping/not processing this invalid message */ + crq->desc[crq->next_to_use].flag = 0; + hclge_mbx_ring_ptr_move_crq(crq); + continue; + } + + trace_hclge_pf_mbx_get(hdev, req); + + /* clear the resp_msg before processing every mailbox message */ + memset(&resp_msg, 0, sizeof(resp_msg)); + param.vport = &hdev->vport[req->mbx_src_vfid]; + param.req = req; + hclge_mbx_request_handling(¶m); + + crq->desc[crq->next_to_use].flag = 0; + hclge_mbx_ring_ptr_move_crq(crq); + } + + /* Write back CMDQ_RQ header pointer, M7 need this pointer */ + hclge_write_dev(&hdev->hw, HCLGE_COMM_NIC_CRQ_HEAD_REG, + crq->next_to_use); +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c new file mode 100644 index 000000000..85fb11de4 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c @@ -0,0 +1,311 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. + +#include +#include +#include + +#include "hclge_cmd.h" +#include "hclge_main.h" +#include "hclge_mdio.h" + +enum hclge_mdio_c22_op_seq { + HCLGE_MDIO_C22_WRITE = 1, + HCLGE_MDIO_C22_READ = 2 +}; + +#define HCLGE_MDIO_CTRL_START_B 0 +#define HCLGE_MDIO_CTRL_ST_S 1 +#define HCLGE_MDIO_CTRL_ST_M (0x3 << HCLGE_MDIO_CTRL_ST_S) +#define HCLGE_MDIO_CTRL_OP_S 3 +#define HCLGE_MDIO_CTRL_OP_M (0x3 << HCLGE_MDIO_CTRL_OP_S) + +#define HCLGE_MDIO_PHYID_S 0 +#define HCLGE_MDIO_PHYID_M (0x1f << HCLGE_MDIO_PHYID_S) + +#define HCLGE_MDIO_PHYREG_S 0 +#define HCLGE_MDIO_PHYREG_M (0x1f << HCLGE_MDIO_PHYREG_S) + +#define HCLGE_MDIO_STA_B 0 + +struct hclge_mdio_cfg_cmd { + u8 ctrl_bit; + u8 phyid; + u8 phyad; + u8 rsvd; + __le16 reserve; + __le16 data_wr; + __le16 data_rd; + __le16 sta; +}; + +static int hclge_mdio_write(struct mii_bus *bus, int phyid, int regnum, + u16 data) +{ + struct hclge_mdio_cfg_cmd *mdio_cmd; + struct hclge_dev *hdev = bus->priv; + struct hclge_desc desc; + int ret; + + if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state)) + return -EBUSY; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MDIO_CONFIG, false); + + mdio_cmd = (struct hclge_mdio_cfg_cmd *)desc.data; + + hnae3_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M, + HCLGE_MDIO_PHYID_S, (u32)phyid); + hnae3_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M, + HCLGE_MDIO_PHYREG_S, (u32)regnum); + + hnae3_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1); + hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M, + HCLGE_MDIO_CTRL_ST_S, 1); + hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_OP_M, + HCLGE_MDIO_CTRL_OP_S, HCLGE_MDIO_C22_WRITE); + + mdio_cmd->data_wr = cpu_to_le16(data); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "mdio write fail when sending cmd, status is %d.\n", + ret); + return ret; + } + + return 0; +} + +static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum) +{ + struct hclge_mdio_cfg_cmd *mdio_cmd; + struct hclge_dev *hdev = bus->priv; + struct hclge_desc desc; + int ret; + + if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state)) + return -EBUSY; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MDIO_CONFIG, true); + + mdio_cmd = (struct hclge_mdio_cfg_cmd *)desc.data; + + hnae3_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M, + HCLGE_MDIO_PHYID_S, (u32)phyid); + hnae3_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M, + HCLGE_MDIO_PHYREG_S, (u32)regnum); + + hnae3_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1); + hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M, + HCLGE_MDIO_CTRL_ST_S, 1); + hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_OP_M, + HCLGE_MDIO_CTRL_OP_S, HCLGE_MDIO_C22_READ); + + /* Read out phy data */ + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "mdio read fail when get data, status is %d.\n", + ret); + return ret; + } + + if (hnae3_get_bit(le16_to_cpu(mdio_cmd->sta), HCLGE_MDIO_STA_B)) { + dev_err(&hdev->pdev->dev, "mdio read data error\n"); + return -EIO; + } + + return le16_to_cpu(mdio_cmd->data_rd); +} + +int hclge_mac_mdio_config(struct hclge_dev *hdev) +{ +#define PHY_INEXISTENT 255 + + struct hclge_mac *mac = &hdev->hw.mac; + struct phy_device *phydev; + struct mii_bus *mdio_bus; + int ret; + + if (hdev->hw.mac.phy_addr == PHY_INEXISTENT) { + dev_info(&hdev->pdev->dev, + "no phy device is connected to mdio bus\n"); + return 0; + } else if (hdev->hw.mac.phy_addr >= PHY_MAX_ADDR) { + dev_err(&hdev->pdev->dev, "phy_addr(%u) is too large.\n", + hdev->hw.mac.phy_addr); + return -EINVAL; + } + + mdio_bus = devm_mdiobus_alloc(&hdev->pdev->dev); + if (!mdio_bus) + return -ENOMEM; + + mdio_bus->name = "hisilicon MII bus"; + mdio_bus->read = hclge_mdio_read; + mdio_bus->write = hclge_mdio_write; + snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-%s", "mii", + dev_name(&hdev->pdev->dev)); + + mdio_bus->parent = &hdev->pdev->dev; + mdio_bus->priv = hdev; + mdio_bus->phy_mask = ~(1 << mac->phy_addr); + ret = mdiobus_register(mdio_bus); + if (ret) { + dev_err(mdio_bus->parent, + "failed to register MDIO bus, ret = %d\n", ret); + return ret; + } + + phydev = mdiobus_get_phy(mdio_bus, mac->phy_addr); + if (!phydev) { + dev_err(mdio_bus->parent, "Failed to get phy device\n"); + mdiobus_unregister(mdio_bus); + return -EIO; + } + + mac->phydev = phydev; + mac->mdio_bus = mdio_bus; + + return 0; +} + +static void hclge_mac_adjust_link(struct net_device *netdev) +{ + struct hnae3_handle *h = *((void **)netdev_priv(netdev)); + struct hclge_vport *vport = hclge_get_vport(h); + struct hclge_dev *hdev = vport->back; + int duplex, speed; + int ret; + + /* When phy link down, do nothing */ + if (netdev->phydev->link == 0) + return; + + speed = netdev->phydev->speed; + duplex = netdev->phydev->duplex; + + ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex, 0); + if (ret) + netdev_err(netdev, "failed to adjust link.\n"); + + ret = hclge_cfg_flowctrl(hdev); + if (ret) + netdev_err(netdev, "failed to configure flow control.\n"); +} + +int hclge_mac_connect_phy(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct net_device *netdev = hdev->vport[0].nic.netdev; + struct phy_device *phydev = hdev->hw.mac.phydev; + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; + int ret; + + if (!phydev) + return 0; + + linkmode_clear_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, phydev->supported); + + phydev->dev_flags |= MARVELL_PHY_LED0_LINK_LED1_ACTIVE; + + ret = phy_connect_direct(netdev, phydev, + hclge_mac_adjust_link, + PHY_INTERFACE_MODE_SGMII); + if (ret) { + netdev_err(netdev, "phy_connect_direct err.\n"); + return ret; + } + + linkmode_copy(mask, hdev->hw.mac.supported); + linkmode_and(phydev->supported, phydev->supported, mask); + linkmode_copy(phydev->advertising, phydev->supported); + + /* supported flag is Pause and Asym Pause, but default advertising + * should be rx on, tx on, so need clear Asym Pause in advertising + * flag + */ + linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, + phydev->advertising); + + phy_attached_info(phydev); + + return 0; +} + +void hclge_mac_disconnect_phy(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct phy_device *phydev = hdev->hw.mac.phydev; + + if (!phydev) + return; + + phy_disconnect(phydev); +} + +void hclge_mac_start_phy(struct hclge_dev *hdev) +{ + struct phy_device *phydev = hdev->hw.mac.phydev; + + if (!phydev) + return; + + phy_loopback(phydev, false); + + phy_start(phydev); +} + +void hclge_mac_stop_phy(struct hclge_dev *hdev) +{ + struct net_device *netdev = hdev->vport[0].nic.netdev; + struct phy_device *phydev = netdev->phydev; + + if (!phydev) + return; + + phy_stop(phydev); +} + +u16 hclge_read_phy_reg(struct hclge_dev *hdev, u16 reg_addr) +{ + struct hclge_phy_reg_cmd *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PHY_REG, true); + + req = (struct hclge_phy_reg_cmd *)desc.data; + req->reg_addr = cpu_to_le16(reg_addr); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to read phy reg, ret = %d.\n", ret); + + return le16_to_cpu(req->reg_val); +} + +int hclge_write_phy_reg(struct hclge_dev *hdev, u16 reg_addr, u16 val) +{ + struct hclge_phy_reg_cmd *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PHY_REG, false); + + req = (struct hclge_phy_reg_cmd *)desc.data; + req->reg_addr = cpu_to_le16(reg_addr); + req->reg_val = cpu_to_le16(val); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to write phy reg, ret = %d.\n", ret); + + return ret; +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h new file mode 100644 index 000000000..4200d0b6d --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +// Copyright (c) 2016-2017 Hisilicon Limited. + +#ifndef __HCLGE_MDIO_H +#define __HCLGE_MDIO_H + +#include "hnae3.h" + +struct hclge_dev; + +int hclge_mac_mdio_config(struct hclge_dev *hdev); +int hclge_mac_connect_phy(struct hnae3_handle *handle); +void hclge_mac_disconnect_phy(struct hnae3_handle *handle); +void hclge_mac_start_phy(struct hclge_dev *hdev); +void hclge_mac_stop_phy(struct hclge_dev *hdev); +u16 hclge_read_phy_reg(struct hclge_dev *hdev, u16 reg_addr); +int hclge_write_phy_reg(struct hclge_dev *hdev, u16 reg_addr, u16 val); + +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c new file mode 100644 index 000000000..a40b1583f --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c @@ -0,0 +1,564 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2021 Hisilicon Limited. + +#include +#include "hclge_main.h" +#include "hnae3.h" + +static int hclge_ptp_get_cycle(struct hclge_dev *hdev) +{ + struct hclge_ptp *ptp = hdev->ptp; + + ptp->cycle.quo = readl(hdev->ptp->io_base + HCLGE_PTP_CYCLE_QUO_REG) & + HCLGE_PTP_CYCLE_QUO_MASK; + ptp->cycle.numer = readl(hdev->ptp->io_base + HCLGE_PTP_CYCLE_NUM_REG); + ptp->cycle.den = readl(hdev->ptp->io_base + HCLGE_PTP_CYCLE_DEN_REG); + + if (ptp->cycle.den == 0) { + dev_err(&hdev->pdev->dev, "invalid ptp cycle denominator!\n"); + return -EINVAL; + } + + return 0; +} + +static int hclge_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) +{ + struct hclge_dev *hdev = hclge_ptp_get_hdev(ptp); + struct hclge_ptp_cycle *cycle = &hdev->ptp->cycle; + u64 adj_val, adj_base, diff; + unsigned long flags; + bool is_neg = false; + u32 quo, numerator; + + if (ppb < 0) { + ppb = -ppb; + is_neg = true; + } + + adj_base = (u64)cycle->quo * (u64)cycle->den + (u64)cycle->numer; + adj_val = adj_base * ppb; + diff = div_u64(adj_val, 1000000000ULL); + + if (is_neg) + adj_val = adj_base - diff; + else + adj_val = adj_base + diff; + + /* This clock cycle is defined by three part: quotient, numerator + * and denominator. For example, 2.5ns, the quotient is 2, + * denominator is fixed to ptp->cycle.den, and numerator + * is 0.5 * ptp->cycle.den. + */ + quo = div_u64_rem(adj_val, cycle->den, &numerator); + + spin_lock_irqsave(&hdev->ptp->lock, flags); + writel(quo & HCLGE_PTP_CYCLE_QUO_MASK, + hdev->ptp->io_base + HCLGE_PTP_CYCLE_QUO_REG); + writel(numerator, hdev->ptp->io_base + HCLGE_PTP_CYCLE_NUM_REG); + writel(cycle->den, hdev->ptp->io_base + HCLGE_PTP_CYCLE_DEN_REG); + writel(HCLGE_PTP_CYCLE_ADJ_EN, + hdev->ptp->io_base + HCLGE_PTP_CYCLE_CFG_REG); + spin_unlock_irqrestore(&hdev->ptp->lock, flags); + + return 0; +} + +bool hclge_ptp_set_tx_info(struct hnae3_handle *handle, struct sk_buff *skb) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hclge_ptp *ptp = hdev->ptp; + + if (!test_bit(HCLGE_PTP_FLAG_TX_EN, &ptp->flags) || + test_and_set_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state)) { + ptp->tx_skipped++; + return false; + } + + ptp->tx_start = jiffies; + ptp->tx_skb = skb_get(skb); + ptp->tx_cnt++; + + return true; +} + +void hclge_ptp_clean_tx_hwts(struct hclge_dev *hdev) +{ + struct sk_buff *skb = hdev->ptp->tx_skb; + struct skb_shared_hwtstamps hwts; + u32 hi, lo; + u64 ns; + + ns = readl(hdev->ptp->io_base + HCLGE_PTP_TX_TS_NSEC_REG) & + HCLGE_PTP_TX_TS_NSEC_MASK; + lo = readl(hdev->ptp->io_base + HCLGE_PTP_TX_TS_SEC_L_REG); + hi = readl(hdev->ptp->io_base + HCLGE_PTP_TX_TS_SEC_H_REG) & + HCLGE_PTP_TX_TS_SEC_H_MASK; + hdev->ptp->last_tx_seqid = readl(hdev->ptp->io_base + + HCLGE_PTP_TX_TS_SEQID_REG); + + if (skb) { + hdev->ptp->tx_skb = NULL; + hdev->ptp->tx_cleaned++; + + ns += (((u64)hi) << 32 | lo) * NSEC_PER_SEC; + hwts.hwtstamp = ns_to_ktime(ns); + skb_tstamp_tx(skb, &hwts); + dev_kfree_skb_any(skb); + } + + clear_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state); +} + +void hclge_ptp_get_rx_hwts(struct hnae3_handle *handle, struct sk_buff *skb, + u32 nsec, u32 sec) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + unsigned long flags; + u64 ns = nsec; + u32 sec_h; + + if (!test_bit(HCLGE_PTP_FLAG_RX_EN, &hdev->ptp->flags)) + return; + + /* Since the BD does not have enough space for the higher 16 bits of + * second, and this part will not change frequently, so read it + * from register. + */ + spin_lock_irqsave(&hdev->ptp->lock, flags); + sec_h = readl(hdev->ptp->io_base + HCLGE_PTP_CUR_TIME_SEC_H_REG); + spin_unlock_irqrestore(&hdev->ptp->lock, flags); + + ns += (((u64)sec_h) << HCLGE_PTP_SEC_H_OFFSET | sec) * NSEC_PER_SEC; + skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns); + hdev->ptp->last_rx = jiffies; + hdev->ptp->rx_cnt++; +} + +static int hclge_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts, + struct ptp_system_timestamp *sts) +{ + struct hclge_dev *hdev = hclge_ptp_get_hdev(ptp); + unsigned long flags; + u32 hi, lo; + u64 ns; + + spin_lock_irqsave(&hdev->ptp->lock, flags); + ns = readl(hdev->ptp->io_base + HCLGE_PTP_CUR_TIME_NSEC_REG); + hi = readl(hdev->ptp->io_base + HCLGE_PTP_CUR_TIME_SEC_H_REG); + lo = readl(hdev->ptp->io_base + HCLGE_PTP_CUR_TIME_SEC_L_REG); + spin_unlock_irqrestore(&hdev->ptp->lock, flags); + + ns += (((u64)hi) << HCLGE_PTP_SEC_H_OFFSET | lo) * NSEC_PER_SEC; + *ts = ns_to_timespec64(ns); + + return 0; +} + +static int hclge_ptp_settime(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct hclge_dev *hdev = hclge_ptp_get_hdev(ptp); + unsigned long flags; + + spin_lock_irqsave(&hdev->ptp->lock, flags); + writel(ts->tv_nsec, hdev->ptp->io_base + HCLGE_PTP_TIME_NSEC_REG); + writel(ts->tv_sec >> HCLGE_PTP_SEC_H_OFFSET, + hdev->ptp->io_base + HCLGE_PTP_TIME_SEC_H_REG); + writel(ts->tv_sec & HCLGE_PTP_SEC_L_MASK, + hdev->ptp->io_base + HCLGE_PTP_TIME_SEC_L_REG); + /* synchronize the time of phc */ + writel(HCLGE_PTP_TIME_SYNC_EN, + hdev->ptp->io_base + HCLGE_PTP_TIME_SYNC_REG); + spin_unlock_irqrestore(&hdev->ptp->lock, flags); + + return 0; +} + +static int hclge_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + struct hclge_dev *hdev = hclge_ptp_get_hdev(ptp); + unsigned long flags; + bool is_neg = false; + u32 adj_val = 0; + + if (delta < 0) { + adj_val |= HCLGE_PTP_TIME_NSEC_NEG; + delta = -delta; + is_neg = true; + } + + if (delta > HCLGE_PTP_TIME_NSEC_MASK) { + struct timespec64 ts; + s64 ns; + + hclge_ptp_gettimex(ptp, &ts, NULL); + ns = timespec64_to_ns(&ts); + ns = is_neg ? ns - delta : ns + delta; + ts = ns_to_timespec64(ns); + return hclge_ptp_settime(ptp, &ts); + } + + adj_val |= delta & HCLGE_PTP_TIME_NSEC_MASK; + + spin_lock_irqsave(&hdev->ptp->lock, flags); + writel(adj_val, hdev->ptp->io_base + HCLGE_PTP_TIME_NSEC_REG); + writel(HCLGE_PTP_TIME_ADJ_EN, + hdev->ptp->io_base + HCLGE_PTP_TIME_ADJ_REG); + spin_unlock_irqrestore(&hdev->ptp->lock, flags); + + return 0; +} + +int hclge_ptp_get_cfg(struct hclge_dev *hdev, struct ifreq *ifr) +{ + if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state)) + return -EOPNOTSUPP; + + return copy_to_user(ifr->ifr_data, &hdev->ptp->ts_cfg, + sizeof(struct hwtstamp_config)) ? -EFAULT : 0; +} + +static int hclge_ptp_int_en(struct hclge_dev *hdev, bool en) +{ + struct hclge_ptp_int_cmd *req; + struct hclge_desc desc; + int ret; + + req = (struct hclge_ptp_int_cmd *)desc.data; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PTP_INT_EN, false); + req->int_en = en ? 1 : 0; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to %s ptp interrupt, ret = %d\n", + en ? "enable" : "disable", ret); + + return ret; +} + +int hclge_ptp_cfg_qry(struct hclge_dev *hdev, u32 *cfg) +{ + struct hclge_ptp_cfg_cmd *req; + struct hclge_desc desc; + int ret; + + req = (struct hclge_ptp_cfg_cmd *)desc.data; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PTP_MODE_CFG, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to query ptp config, ret = %d\n", ret); + return ret; + } + + *cfg = le32_to_cpu(req->cfg); + + return 0; +} + +static int hclge_ptp_cfg(struct hclge_dev *hdev, u32 cfg) +{ + struct hclge_ptp_cfg_cmd *req; + struct hclge_desc desc; + int ret; + + req = (struct hclge_ptp_cfg_cmd *)desc.data; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PTP_MODE_CFG, false); + req->cfg = cpu_to_le32(cfg); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to config ptp, ret = %d\n", ret); + + return ret; +} + +static int hclge_ptp_set_tx_mode(struct hwtstamp_config *cfg, + unsigned long *flags, u32 *ptp_cfg) +{ + switch (cfg->tx_type) { + case HWTSTAMP_TX_OFF: + clear_bit(HCLGE_PTP_FLAG_TX_EN, flags); + break; + case HWTSTAMP_TX_ON: + set_bit(HCLGE_PTP_FLAG_TX_EN, flags); + *ptp_cfg |= HCLGE_PTP_TX_EN_B; + break; + default: + return -ERANGE; + } + + return 0; +} + +static int hclge_ptp_set_rx_mode(struct hwtstamp_config *cfg, + unsigned long *flags, u32 *ptp_cfg) +{ + int rx_filter = cfg->rx_filter; + + switch (cfg->rx_filter) { + case HWTSTAMP_FILTER_NONE: + clear_bit(HCLGE_PTP_FLAG_RX_EN, flags); + break; + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + set_bit(HCLGE_PTP_FLAG_RX_EN, flags); + *ptp_cfg |= HCLGE_PTP_RX_EN_B; + *ptp_cfg |= HCLGE_PTP_UDP_FULL_TYPE << HCLGE_PTP_UDP_EN_SHIFT; + rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; + break; + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + set_bit(HCLGE_PTP_FLAG_RX_EN, flags); + *ptp_cfg |= HCLGE_PTP_RX_EN_B; + *ptp_cfg |= HCLGE_PTP_UDP_FULL_TYPE << HCLGE_PTP_UDP_EN_SHIFT; + *ptp_cfg |= HCLGE_PTP_MSG1_V2_DEFAULT << HCLGE_PTP_MSG1_SHIFT; + *ptp_cfg |= HCLGE_PTP_MSG0_V2_EVENT << HCLGE_PTP_MSG0_SHIFT; + *ptp_cfg |= HCLGE_PTP_MSG_TYPE_V2 << HCLGE_PTP_MSG_TYPE_SHIFT; + rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + break; + case HWTSTAMP_FILTER_ALL: + default: + return -ERANGE; + } + + cfg->rx_filter = rx_filter; + + return 0; +} + +static int hclge_ptp_set_ts_mode(struct hclge_dev *hdev, + struct hwtstamp_config *cfg) +{ + unsigned long flags = hdev->ptp->flags; + u32 ptp_cfg = 0; + int ret; + + if (test_bit(HCLGE_PTP_FLAG_EN, &hdev->ptp->flags)) + ptp_cfg |= HCLGE_PTP_EN_B; + + ret = hclge_ptp_set_tx_mode(cfg, &flags, &ptp_cfg); + if (ret) + return ret; + + ret = hclge_ptp_set_rx_mode(cfg, &flags, &ptp_cfg); + if (ret) + return ret; + + ret = hclge_ptp_cfg(hdev, ptp_cfg); + if (ret) + return ret; + + hdev->ptp->flags = flags; + hdev->ptp->ptp_cfg = ptp_cfg; + + return 0; +} + +int hclge_ptp_set_cfg(struct hclge_dev *hdev, struct ifreq *ifr) +{ + struct hwtstamp_config cfg; + int ret; + + if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state)) { + dev_err(&hdev->pdev->dev, "phc is unsupported\n"); + return -EOPNOTSUPP; + } + + if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) + return -EFAULT; + + ret = hclge_ptp_set_ts_mode(hdev, &cfg); + if (ret) + return ret; + + hdev->ptp->ts_cfg = cfg; + + return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; +} + +int hclge_ptp_get_ts_info(struct hnae3_handle *handle, + struct ethtool_ts_info *info) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state)) { + dev_err(&hdev->pdev->dev, "phc is unsupported\n"); + return -EOPNOTSUPP; + } + + info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + if (hdev->ptp->clock) + info->phc_index = ptp_clock_index(hdev->ptp->clock); + else + info->phc_index = -1; + + info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON); + + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | + BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | + BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | + BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ); + + info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | + BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | + BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) | + BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | + BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) | + BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | + BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) | + BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ); + + return 0; +} + +static int hclge_ptp_create_clock(struct hclge_dev *hdev) +{ + struct hclge_ptp *ptp; + + ptp = devm_kzalloc(&hdev->pdev->dev, sizeof(*ptp), GFP_KERNEL); + if (!ptp) + return -ENOMEM; + + ptp->hdev = hdev; + snprintf(ptp->info.name, sizeof(ptp->info.name), "%s", + HCLGE_DRIVER_NAME); + ptp->info.owner = THIS_MODULE; + ptp->info.max_adj = HCLGE_PTP_CYCLE_ADJ_MAX; + ptp->info.n_ext_ts = 0; + ptp->info.pps = 0; + ptp->info.adjfreq = hclge_ptp_adjfreq; + ptp->info.adjtime = hclge_ptp_adjtime; + ptp->info.gettimex64 = hclge_ptp_gettimex; + ptp->info.settime64 = hclge_ptp_settime; + + ptp->info.n_alarm = 0; + ptp->clock = ptp_clock_register(&ptp->info, &hdev->pdev->dev); + if (IS_ERR(ptp->clock)) { + dev_err(&hdev->pdev->dev, + "%d failed to register ptp clock, ret = %ld\n", + ptp->info.n_alarm, PTR_ERR(ptp->clock)); + return -ENODEV; + } else if (!ptp->clock) { + dev_err(&hdev->pdev->dev, "failed to register ptp clock\n"); + return -ENODEV; + } + + spin_lock_init(&ptp->lock); + ptp->io_base = hdev->hw.hw.io_base + HCLGE_PTP_REG_OFFSET; + ptp->ts_cfg.rx_filter = HWTSTAMP_FILTER_NONE; + ptp->ts_cfg.tx_type = HWTSTAMP_TX_OFF; + hdev->ptp = ptp; + + return 0; +} + +static void hclge_ptp_destroy_clock(struct hclge_dev *hdev) +{ + ptp_clock_unregister(hdev->ptp->clock); + hdev->ptp->clock = NULL; + devm_kfree(&hdev->pdev->dev, hdev->ptp); + hdev->ptp = NULL; +} + +int hclge_ptp_init(struct hclge_dev *hdev) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); + struct timespec64 ts; + int ret; + + if (!test_bit(HNAE3_DEV_SUPPORT_PTP_B, ae_dev->caps)) + return 0; + + if (!hdev->ptp) { + ret = hclge_ptp_create_clock(hdev); + if (ret) + return ret; + + ret = hclge_ptp_get_cycle(hdev); + if (ret) + return ret; + } + + ret = hclge_ptp_int_en(hdev, true); + if (ret) + goto out; + + set_bit(HCLGE_PTP_FLAG_EN, &hdev->ptp->flags); + ret = hclge_ptp_adjfreq(&hdev->ptp->info, 0); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to init freq, ret = %d\n", ret); + goto out; + } + + ret = hclge_ptp_set_ts_mode(hdev, &hdev->ptp->ts_cfg); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to init ts mode, ret = %d\n", ret); + goto out; + } + + ktime_get_real_ts64(&ts); + ret = hclge_ptp_settime(&hdev->ptp->info, &ts); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to init ts time, ret = %d\n", ret); + goto out; + } + + set_bit(HCLGE_STATE_PTP_EN, &hdev->state); + dev_info(&hdev->pdev->dev, "phc initializes ok!\n"); + + return 0; + +out: + hclge_ptp_destroy_clock(hdev); + + return ret; +} + +void hclge_ptp_uninit(struct hclge_dev *hdev) +{ + struct hclge_ptp *ptp = hdev->ptp; + + if (!ptp) + return; + + hclge_ptp_int_en(hdev, false); + clear_bit(HCLGE_STATE_PTP_EN, &hdev->state); + clear_bit(HCLGE_PTP_FLAG_EN, &ptp->flags); + ptp->ts_cfg.rx_filter = HWTSTAMP_FILTER_NONE; + ptp->ts_cfg.tx_type = HWTSTAMP_TX_OFF; + + if (hclge_ptp_set_ts_mode(hdev, &ptp->ts_cfg)) + dev_err(&hdev->pdev->dev, "failed to disable phc\n"); + + if (ptp->tx_skb) { + struct sk_buff *skb = ptp->tx_skb; + + ptp->tx_skb = NULL; + dev_kfree_skb_any(skb); + } + + hclge_ptp_destroy_clock(hdev); +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h new file mode 100644 index 000000000..bbee74cd8 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h @@ -0,0 +1,143 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +// Copyright (c) 2021 Hisilicon Limited. + +#ifndef __HCLGE_PTP_H +#define __HCLGE_PTP_H + +#include +#include +#include + +struct hclge_dev; +struct ifreq; + +#define HCLGE_PTP_REG_OFFSET 0x29000 + +#define HCLGE_PTP_TX_TS_SEQID_REG 0x0 +#define HCLGE_PTP_TX_TS_NSEC_REG 0x4 +#define HCLGE_PTP_TX_TS_NSEC_MASK GENMASK(29, 0) +#define HCLGE_PTP_TX_TS_SEC_L_REG 0x8 +#define HCLGE_PTP_TX_TS_SEC_H_REG 0xC +#define HCLGE_PTP_TX_TS_SEC_H_MASK GENMASK(15, 0) +#define HCLGE_PTP_TX_TS_CNT_REG 0x30 + +#define HCLGE_PTP_TIME_SEC_H_REG 0x50 +#define HCLGE_PTP_TIME_SEC_H_MASK GENMASK(15, 0) +#define HCLGE_PTP_TIME_SEC_L_REG 0x54 +#define HCLGE_PTP_TIME_NSEC_REG 0x58 +#define HCLGE_PTP_TIME_NSEC_MASK GENMASK(29, 0) +#define HCLGE_PTP_TIME_NSEC_NEG BIT(31) +#define HCLGE_PTP_TIME_SYNC_REG 0x5C +#define HCLGE_PTP_TIME_SYNC_EN BIT(0) +#define HCLGE_PTP_TIME_ADJ_REG 0x60 +#define HCLGE_PTP_TIME_ADJ_EN BIT(0) +#define HCLGE_PTP_CYCLE_QUO_REG 0x64 +#define HCLGE_PTP_CYCLE_QUO_MASK GENMASK(7, 0) +#define HCLGE_PTP_CYCLE_DEN_REG 0x68 +#define HCLGE_PTP_CYCLE_NUM_REG 0x6C +#define HCLGE_PTP_CYCLE_CFG_REG 0x70 +#define HCLGE_PTP_CYCLE_ADJ_EN BIT(0) +#define HCLGE_PTP_CUR_TIME_SEC_H_REG 0x74 +#define HCLGE_PTP_CUR_TIME_SEC_L_REG 0x78 +#define HCLGE_PTP_CUR_TIME_NSEC_REG 0x7C + +#define HCLGE_PTP_CYCLE_ADJ_MAX 500000000 +#define HCLGE_PTP_SEC_H_OFFSET 32u +#define HCLGE_PTP_SEC_L_MASK GENMASK(31, 0) + +#define HCLGE_PTP_FLAG_EN 0 +#define HCLGE_PTP_FLAG_TX_EN 1 +#define HCLGE_PTP_FLAG_RX_EN 2 + +struct hclge_ptp_cycle { + u32 quo; + u32 numer; + u32 den; +}; + +struct hclge_ptp { + struct hclge_dev *hdev; + struct ptp_clock *clock; + struct sk_buff *tx_skb; + unsigned long flags; + void __iomem *io_base; + struct ptp_clock_info info; + struct hwtstamp_config ts_cfg; + spinlock_t lock; /* protects ptp registers */ + u32 ptp_cfg; + u32 last_tx_seqid; + struct hclge_ptp_cycle cycle; + unsigned long tx_start; + unsigned long tx_cnt; + unsigned long tx_skipped; + unsigned long tx_cleaned; + unsigned long last_rx; + unsigned long rx_cnt; + unsigned long tx_timeout; +}; + +struct hclge_ptp_int_cmd { +#define HCLGE_PTP_INT_EN_B BIT(0) + + u8 int_en; + u8 rsvd[23]; +}; + +enum hclge_ptp_udp_type { + HCLGE_PTP_UDP_NOT_TYPE, + HCLGE_PTP_UDP_P13F_TYPE, + HCLGE_PTP_UDP_P140_TYPE, + HCLGE_PTP_UDP_FULL_TYPE, +}; + +enum hclge_ptp_msg_type { + HCLGE_PTP_MSG_TYPE_V2_L2, + HCLGE_PTP_MSG_TYPE_V2, + HCLGE_PTP_MSG_TYPE_V2_EVENT, +}; + +enum hclge_ptp_msg0_type { + HCLGE_PTP_MSG0_V2_DELAY_REQ = 1, + HCLGE_PTP_MSG0_V2_PDELAY_REQ, + HCLGE_PTP_MSG0_V2_DELAY_RESP, + HCLGE_PTP_MSG0_V2_EVENT = 0xF, +}; + +#define HCLGE_PTP_MSG1_V2_DEFAULT 1 + +struct hclge_ptp_cfg_cmd { +#define HCLGE_PTP_EN_B BIT(0) +#define HCLGE_PTP_TX_EN_B BIT(1) +#define HCLGE_PTP_RX_EN_B BIT(2) +#define HCLGE_PTP_UDP_EN_SHIFT 3 +#define HCLGE_PTP_UDP_EN_MASK GENMASK(4, 3) +#define HCLGE_PTP_MSG_TYPE_SHIFT 8 +#define HCLGE_PTP_MSG_TYPE_MASK GENMASK(9, 8) +#define HCLGE_PTP_MSG1_SHIFT 16 +#define HCLGE_PTP_MSG1_MASK GENMASK(19, 16) +#define HCLGE_PTP_MSG0_SHIFT 24 +#define HCLGE_PTP_MSG0_MASK GENMASK(27, 24) + + __le32 cfg; + u8 rsvd[20]; +}; + +static inline struct hclge_dev *hclge_ptp_get_hdev(struct ptp_clock_info *info) +{ + struct hclge_ptp *ptp = container_of(info, struct hclge_ptp, info); + + return ptp->hdev; +} + +bool hclge_ptp_set_tx_info(struct hnae3_handle *handle, struct sk_buff *skb); +void hclge_ptp_clean_tx_hwts(struct hclge_dev *hdev); +void hclge_ptp_get_rx_hwts(struct hnae3_handle *handle, struct sk_buff *skb, + u32 nsec, u32 sec); +int hclge_ptp_get_cfg(struct hclge_dev *hdev, struct ifreq *ifr); +int hclge_ptp_set_cfg(struct hclge_dev *hdev, struct ifreq *ifr); +int hclge_ptp_init(struct hclge_dev *hdev); +void hclge_ptp_uninit(struct hclge_dev *hdev); +int hclge_ptp_get_ts_info(struct hnae3_handle *handle, + struct ethtool_ts_info *info); +int hclge_ptp_cfg_qry(struct hclge_dev *hdev, u32 *cfg); +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c new file mode 100644 index 000000000..8b40c6b4e --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c @@ -0,0 +1,2116 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. + +#include + +#include "hclge_cmd.h" +#include "hclge_main.h" +#include "hclge_tm.h" + +enum hclge_shaper_level { + HCLGE_SHAPER_LVL_PRI = 0, + HCLGE_SHAPER_LVL_PG = 1, + HCLGE_SHAPER_LVL_PORT = 2, + HCLGE_SHAPER_LVL_QSET = 3, + HCLGE_SHAPER_LVL_CNT = 4, + HCLGE_SHAPER_LVL_VF = 0, + HCLGE_SHAPER_LVL_PF = 1, +}; + +#define HCLGE_TM_PFC_PKT_GET_CMD_NUM 3 +#define HCLGE_TM_PFC_NUM_GET_PER_CMD 3 + +#define HCLGE_SHAPER_BS_U_DEF 5 +#define HCLGE_SHAPER_BS_S_DEF 20 + +/* hclge_shaper_para_calc: calculate ir parameter for the shaper + * @ir: Rate to be config, its unit is Mbps + * @shaper_level: the shaper level. eg: port, pg, priority, queueset + * @ir_para: parameters of IR shaper + * @max_tm_rate: max tm rate is available to config + * + * the formula: + * + * IR_b * (2 ^ IR_u) * 8 + * IR(Mbps) = ------------------------- * CLOCK(1000Mbps) + * Tick * (2 ^ IR_s) + * + * @return: 0: calculate sucessful, negative: fail + */ +static int hclge_shaper_para_calc(u32 ir, u8 shaper_level, + struct hclge_shaper_ir_para *ir_para, + u32 max_tm_rate) +{ +#define DEFAULT_SHAPER_IR_B 126 +#define DIVISOR_CLK (1000 * 8) +#define DEFAULT_DIVISOR_IR_B (DEFAULT_SHAPER_IR_B * DIVISOR_CLK) + + static const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = { + 6 * 256, /* Prioriy level */ + 6 * 32, /* Prioriy group level */ + 6 * 8, /* Port level */ + 6 * 256 /* Qset level */ + }; + u8 ir_u_calc = 0; + u8 ir_s_calc = 0; + u32 ir_calc; + u32 tick; + + /* Calc tick */ + if (shaper_level >= HCLGE_SHAPER_LVL_CNT || + ir > max_tm_rate) + return -EINVAL; + + tick = tick_array[shaper_level]; + + /** + * Calc the speed if ir_b = 126, ir_u = 0 and ir_s = 0 + * the formula is changed to: + * 126 * 1 * 8 + * ir_calc = ---------------- * 1000 + * tick * 1 + */ + ir_calc = (DEFAULT_DIVISOR_IR_B + (tick >> 1) - 1) / tick; + + if (ir_calc == ir) { + ir_para->ir_b = DEFAULT_SHAPER_IR_B; + ir_para->ir_u = 0; + ir_para->ir_s = 0; + + return 0; + } else if (ir_calc > ir) { + /* Increasing the denominator to select ir_s value */ + while (ir_calc >= ir && ir) { + ir_s_calc++; + ir_calc = DEFAULT_DIVISOR_IR_B / + (tick * (1 << ir_s_calc)); + } + + ir_para->ir_b = (ir * tick * (1 << ir_s_calc) + + (DIVISOR_CLK >> 1)) / DIVISOR_CLK; + } else { + /* Increasing the numerator to select ir_u value */ + u32 numerator; + + while (ir_calc < ir) { + ir_u_calc++; + numerator = DEFAULT_DIVISOR_IR_B * (1 << ir_u_calc); + ir_calc = (numerator + (tick >> 1)) / tick; + } + + if (ir_calc == ir) { + ir_para->ir_b = DEFAULT_SHAPER_IR_B; + } else { + u32 denominator = DIVISOR_CLK * (1 << --ir_u_calc); + ir_para->ir_b = (ir * tick + (denominator >> 1)) / + denominator; + } + } + + ir_para->ir_u = ir_u_calc; + ir_para->ir_s = ir_s_calc; + + return 0; +} + +static const u16 hclge_pfc_tx_stats_offset[] = { + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num), + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num), + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num), + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num), + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num), + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num), + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num), + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num) +}; + +static const u16 hclge_pfc_rx_stats_offset[] = { + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num), + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num), + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num), + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num), + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num), + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num), + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num), + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num) +}; + +static void hclge_pfc_stats_get(struct hclge_dev *hdev, bool tx, u64 *stats) +{ + const u16 *offset; + int i; + + if (tx) + offset = hclge_pfc_tx_stats_offset; + else + offset = hclge_pfc_rx_stats_offset; + + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) + stats[i] = HCLGE_STATS_READ(&hdev->mac_stats, offset[i]); +} + +void hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats) +{ + hclge_pfc_stats_get(hdev, false, stats); +} + +void hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats) +{ + hclge_pfc_stats_get(hdev, true, stats); +} + +int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx) +{ + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PAUSE_EN, false); + + desc.data[0] = cpu_to_le32((tx ? HCLGE_TX_MAC_PAUSE_EN_MSK : 0) | + (rx ? HCLGE_RX_MAC_PAUSE_EN_MSK : 0)); + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap, + u8 pfc_bitmap) +{ + struct hclge_desc desc; + struct hclge_pfc_en_cmd *pfc = (struct hclge_pfc_en_cmd *)desc.data; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PFC_PAUSE_EN, false); + + pfc->tx_rx_en_bitmap = tx_rx_bitmap; + pfc->pri_en_bitmap = pfc_bitmap; + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_pause_param_cfg(struct hclge_dev *hdev, const u8 *addr, + u8 pause_trans_gap, u16 pause_trans_time) +{ + struct hclge_cfg_pause_param_cmd *pause_param; + struct hclge_desc desc; + + pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, false); + + ether_addr_copy(pause_param->mac_addr, addr); + ether_addr_copy(pause_param->mac_addr_extra, addr); + pause_param->pause_trans_gap = pause_trans_gap; + pause_param->pause_trans_time = cpu_to_le16(pause_trans_time); + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr) +{ + struct hclge_cfg_pause_param_cmd *pause_param; + struct hclge_desc desc; + u16 trans_time; + u8 trans_gap; + int ret; + + pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + return ret; + + trans_gap = pause_param->pause_trans_gap; + trans_time = le16_to_cpu(pause_param->pause_trans_time); + + return hclge_pause_param_cfg(hdev, mac_addr, trans_gap, trans_time); +} + +static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id) +{ + u8 tc; + + tc = hdev->tm_info.prio_tc[pri_id]; + + if (tc >= hdev->tm_info.num_tc) + return -EINVAL; + + /** + * the register for priority has four bytes, the first bytes includes + * priority0 and priority1, the higher 4bit stands for priority1 + * while the lower 4bit stands for priority0, as below: + * first byte: | pri_1 | pri_0 | + * second byte: | pri_3 | pri_2 | + * third byte: | pri_5 | pri_4 | + * fourth byte: | pri_7 | pri_6 | + */ + pri[pri_id >> 1] |= tc << ((pri_id & 1) * 4); + + return 0; +} + +int hclge_up_to_tc_map(struct hclge_dev *hdev) +{ + struct hclge_desc desc; + u8 *pri = (u8 *)desc.data; + u8 pri_id; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, false); + + for (pri_id = 0; pri_id < HNAE3_MAX_USER_PRIO; pri_id++) { + ret = hclge_fill_pri_array(hdev, pri, pri_id); + if (ret) + return ret; + } + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static void hclge_dscp_to_prio_map_init(struct hclge_dev *hdev) +{ + u8 i; + + hdev->vport[0].nic.kinfo.tc_map_mode = HNAE3_TC_MAP_MODE_PRIO; + hdev->vport[0].nic.kinfo.dscp_app_cnt = 0; + for (i = 0; i < HNAE3_MAX_DSCP; i++) + hdev->vport[0].nic.kinfo.dscp_prio[i] = HNAE3_PRIO_ID_INVALID; +} + +int hclge_dscp_to_tc_map(struct hclge_dev *hdev) +{ + struct hclge_desc desc[HCLGE_DSCP_MAP_TC_BD_NUM]; + u8 *req0 = (u8 *)desc[0].data; + u8 *req1 = (u8 *)desc[1].data; + u8 pri_id, tc_id, i, j; + + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QOS_MAP, false); + desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_QOS_MAP, false); + + /* The low 32 dscp setting use bd0, high 32 dscp setting use bd1 */ + for (i = 0; i < HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM; i++) { + pri_id = hdev->vport[0].nic.kinfo.dscp_prio[i]; + pri_id = pri_id == HNAE3_PRIO_ID_INVALID ? 0 : pri_id; + tc_id = hdev->tm_info.prio_tc[pri_id]; + /* Each dscp setting has 4 bits, so each byte saves two dscp + * setting + */ + req0[i >> 1] |= tc_id << HCLGE_DSCP_TC_SHIFT(i); + + j = i + HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM; + pri_id = hdev->vport[0].nic.kinfo.dscp_prio[j]; + pri_id = pri_id == HNAE3_PRIO_ID_INVALID ? 0 : pri_id; + tc_id = hdev->tm_info.prio_tc[pri_id]; + req1[i >> 1] |= tc_id << HCLGE_DSCP_TC_SHIFT(i); + } + + return hclge_cmd_send(&hdev->hw, desc, HCLGE_DSCP_MAP_TC_BD_NUM); +} + +static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev *hdev, + u8 pg_id, u8 pri_bit_map) +{ + struct hclge_pg_to_pri_link_cmd *map; + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_TO_PRI_LINK, false); + + map = (struct hclge_pg_to_pri_link_cmd *)desc.data; + + map->pg_id = pg_id; + map->pri_bit_map = pri_bit_map; + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev, u16 qs_id, u8 pri, + bool link_vld) +{ + struct hclge_qs_to_pri_link_cmd *map; + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, false); + + map = (struct hclge_qs_to_pri_link_cmd *)desc.data; + + map->qs_id = cpu_to_le16(qs_id); + map->priority = pri; + map->link_vld = link_vld ? HCLGE_TM_QS_PRI_LINK_VLD_MSK : 0; + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_tm_q_to_qs_map_cfg(struct hclge_dev *hdev, + u16 q_id, u16 qs_id) +{ + struct hclge_nq_to_qs_link_cmd *map; + struct hclge_desc desc; + u16 qs_id_l; + u16 qs_id_h; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, false); + + map = (struct hclge_nq_to_qs_link_cmd *)desc.data; + + map->nq_id = cpu_to_le16(q_id); + + /* convert qs_id to the following format to support qset_id >= 1024 + * qs_id: | 15 | 14 ~ 10 | 9 ~ 0 | + * / / \ \ + * / / \ \ + * qset_id: | 15 ~ 11 | 10 | 9 ~ 0 | + * | qs_id_h | vld | qs_id_l | + */ + qs_id_l = hnae3_get_field(qs_id, HCLGE_TM_QS_ID_L_MSK, + HCLGE_TM_QS_ID_L_S); + qs_id_h = hnae3_get_field(qs_id, HCLGE_TM_QS_ID_H_MSK, + HCLGE_TM_QS_ID_H_S); + hnae3_set_field(qs_id, HCLGE_TM_QS_ID_L_MSK, HCLGE_TM_QS_ID_L_S, + qs_id_l); + hnae3_set_field(qs_id, HCLGE_TM_QS_ID_H_EXT_MSK, HCLGE_TM_QS_ID_H_EXT_S, + qs_id_h); + map->qset_id = cpu_to_le16(qs_id | HCLGE_TM_Q_QS_LINK_VLD_MSK); + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_tm_pg_weight_cfg(struct hclge_dev *hdev, u8 pg_id, + u8 dwrr) +{ + struct hclge_pg_weight_cmd *weight; + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_WEIGHT, false); + + weight = (struct hclge_pg_weight_cmd *)desc.data; + + weight->pg_id = pg_id; + weight->dwrr = dwrr; + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_tm_pri_weight_cfg(struct hclge_dev *hdev, u8 pri_id, + u8 dwrr) +{ + struct hclge_priority_weight_cmd *weight; + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, false); + + weight = (struct hclge_priority_weight_cmd *)desc.data; + + weight->pri_id = pri_id; + weight->dwrr = dwrr; + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_tm_qs_weight_cfg(struct hclge_dev *hdev, u16 qs_id, + u8 dwrr) +{ + struct hclge_qs_weight_cmd *weight; + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, false); + + weight = (struct hclge_qs_weight_cmd *)desc.data; + + weight->qs_id = cpu_to_le16(qs_id); + weight->dwrr = dwrr; + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static u32 hclge_tm_get_shapping_para(u8 ir_b, u8 ir_u, u8 ir_s, + u8 bs_b, u8 bs_s) +{ + u32 shapping_para = 0; + + hclge_tm_set_field(shapping_para, IR_B, ir_b); + hclge_tm_set_field(shapping_para, IR_U, ir_u); + hclge_tm_set_field(shapping_para, IR_S, ir_s); + hclge_tm_set_field(shapping_para, BS_B, bs_b); + hclge_tm_set_field(shapping_para, BS_S, bs_s); + + return shapping_para; +} + +static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev, + enum hclge_shap_bucket bucket, u8 pg_id, + u32 shapping_para, u32 rate) +{ + struct hclge_pg_shapping_cmd *shap_cfg_cmd; + enum hclge_opcode_type opcode; + struct hclge_desc desc; + + opcode = bucket ? HCLGE_OPC_TM_PG_P_SHAPPING : + HCLGE_OPC_TM_PG_C_SHAPPING; + hclge_cmd_setup_basic_desc(&desc, opcode, false); + + shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data; + + shap_cfg_cmd->pg_id = pg_id; + + shap_cfg_cmd->pg_shapping_para = cpu_to_le32(shapping_para); + + hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1); + + shap_cfg_cmd->pg_rate = cpu_to_le32(rate); + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev) +{ + struct hclge_port_shapping_cmd *shap_cfg_cmd; + struct hclge_shaper_ir_para ir_para; + struct hclge_desc desc; + u32 shapping_para; + int ret; + + ret = hclge_shaper_para_calc(hdev->hw.mac.speed, HCLGE_SHAPER_LVL_PORT, + &ir_para, + hdev->ae_dev->dev_specs.max_tm_rate); + if (ret) + return ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, false); + shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data; + + shapping_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u, + ir_para.ir_s, + HCLGE_SHAPER_BS_U_DEF, + HCLGE_SHAPER_BS_S_DEF); + + shap_cfg_cmd->port_shapping_para = cpu_to_le32(shapping_para); + + hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1); + + shap_cfg_cmd->port_rate = cpu_to_le32(hdev->hw.mac.speed); + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev, + enum hclge_shap_bucket bucket, u8 pri_id, + u32 shapping_para, u32 rate) +{ + struct hclge_pri_shapping_cmd *shap_cfg_cmd; + enum hclge_opcode_type opcode; + struct hclge_desc desc; + + opcode = bucket ? HCLGE_OPC_TM_PRI_P_SHAPPING : + HCLGE_OPC_TM_PRI_C_SHAPPING; + + hclge_cmd_setup_basic_desc(&desc, opcode, false); + + shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data; + + shap_cfg_cmd->pri_id = pri_id; + + shap_cfg_cmd->pri_shapping_para = cpu_to_le32(shapping_para); + + hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1); + + shap_cfg_cmd->pri_rate = cpu_to_le32(rate); + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_tm_pg_schd_mode_cfg(struct hclge_dev *hdev, u8 pg_id) +{ + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_SCH_MODE_CFG, false); + + if (hdev->tm_info.pg_info[pg_id].pg_sch_mode == HCLGE_SCH_MODE_DWRR) + desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK); + else + desc.data[1] = 0; + + desc.data[0] = cpu_to_le32(pg_id); + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_tm_pri_schd_mode_cfg(struct hclge_dev *hdev, u8 pri_id) +{ + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, false); + + if (hdev->tm_info.tc_info[pri_id].tc_sch_mode == HCLGE_SCH_MODE_DWRR) + desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK); + else + desc.data[1] = 0; + + desc.data[0] = cpu_to_le32(pri_id); + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_tm_qs_schd_mode_cfg(struct hclge_dev *hdev, u16 qs_id, u8 mode) +{ + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, false); + + if (mode == HCLGE_SCH_MODE_DWRR) + desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK); + else + desc.data[1] = 0; + + desc.data[0] = cpu_to_le32(qs_id); + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc, u8 grp_id, + u32 bit_map) +{ + struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd; + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_BP_TO_QSET_MAPPING, + false); + + bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data; + + bp_to_qs_map_cmd->tc_id = tc; + bp_to_qs_map_cmd->qs_group_id = grp_id; + bp_to_qs_map_cmd->qs_bit_map = cpu_to_le32(bit_map); + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate) +{ + struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; + struct hclge_qs_shapping_cmd *shap_cfg_cmd; + struct hclge_shaper_ir_para ir_para; + struct hclge_dev *hdev = vport->back; + struct hclge_desc desc; + u32 shaper_para; + int ret, i; + + if (!max_tx_rate) + max_tx_rate = hdev->ae_dev->dev_specs.max_tm_rate; + + ret = hclge_shaper_para_calc(max_tx_rate, HCLGE_SHAPER_LVL_QSET, + &ir_para, + hdev->ae_dev->dev_specs.max_tm_rate); + if (ret) + return ret; + + shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u, + ir_para.ir_s, + HCLGE_SHAPER_BS_U_DEF, + HCLGE_SHAPER_BS_S_DEF); + + for (i = 0; i < kinfo->tc_info.num_tc; i++) { + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG, + false); + + shap_cfg_cmd = (struct hclge_qs_shapping_cmd *)desc.data; + shap_cfg_cmd->qs_id = cpu_to_le16(vport->qs_offset + i); + shap_cfg_cmd->qs_shapping_para = cpu_to_le32(shaper_para); + + hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1); + shap_cfg_cmd->qs_rate = cpu_to_le32(max_tx_rate); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "vport%u, qs%u failed to set tx_rate:%d, ret=%d\n", + vport->vport_id, shap_cfg_cmd->qs_id, + max_tx_rate, ret); + return ret; + } + } + + return 0; +} + +static u16 hclge_vport_get_max_rss_size(struct hclge_vport *vport) +{ + struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; + struct hnae3_tc_info *tc_info = &kinfo->tc_info; + struct hclge_dev *hdev = vport->back; + u16 max_rss_size = 0; + int i; + + if (!tc_info->mqprio_active) + return vport->alloc_tqps / tc_info->num_tc; + + for (i = 0; i < HNAE3_MAX_TC; i++) { + if (!(hdev->hw_tc_map & BIT(i)) || i >= tc_info->num_tc) + continue; + if (max_rss_size < tc_info->tqp_count[i]) + max_rss_size = tc_info->tqp_count[i]; + } + + return max_rss_size; +} + +static u16 hclge_vport_get_tqp_num(struct hclge_vport *vport) +{ + struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; + struct hnae3_tc_info *tc_info = &kinfo->tc_info; + struct hclge_dev *hdev = vport->back; + int sum = 0; + int i; + + if (!tc_info->mqprio_active) + return kinfo->rss_size * tc_info->num_tc; + + for (i = 0; i < HNAE3_MAX_TC; i++) { + if (hdev->hw_tc_map & BIT(i) && i < tc_info->num_tc) + sum += tc_info->tqp_count[i]; + } + + return sum; +} + +static void hclge_tm_update_kinfo_rss_size(struct hclge_vport *vport) +{ + struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; + struct hclge_dev *hdev = vport->back; + u16 vport_max_rss_size; + u16 max_rss_size; + + /* TC configuration is shared by PF/VF in one port, only allow + * one tc for VF for simplicity. VF's vport_id is non zero. + */ + if (vport->vport_id) { + kinfo->tc_info.max_tc = 1; + kinfo->tc_info.num_tc = 1; + vport->qs_offset = HNAE3_MAX_TC + + vport->vport_id - HCLGE_VF_VPORT_START_NUM; + vport_max_rss_size = hdev->vf_rss_size_max; + } else { + kinfo->tc_info.max_tc = hdev->tc_max; + kinfo->tc_info.num_tc = + min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc); + vport->qs_offset = 0; + vport_max_rss_size = hdev->pf_rss_size_max; + } + + max_rss_size = min_t(u16, vport_max_rss_size, + hclge_vport_get_max_rss_size(vport)); + + /* Set to user value, no larger than max_rss_size. */ + if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size && + kinfo->req_rss_size <= max_rss_size) { + dev_info(&hdev->pdev->dev, "rss changes from %u to %u\n", + kinfo->rss_size, kinfo->req_rss_size); + kinfo->rss_size = kinfo->req_rss_size; + } else if (kinfo->rss_size > max_rss_size || + (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) { + /* Set to the maximum specification value (max_rss_size). */ + kinfo->rss_size = max_rss_size; + } +} + +static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport) +{ + struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; + struct hclge_dev *hdev = vport->back; + u8 i; + + hclge_tm_update_kinfo_rss_size(vport); + kinfo->num_tqps = hclge_vport_get_tqp_num(vport); + vport->dwrr = 100; /* 100 percent as init */ + vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit; + + if (vport->vport_id == PF_VPORT_ID) + hdev->rss_cfg.rss_size = kinfo->rss_size; + + /* when enable mqprio, the tc_info has been updated. */ + if (kinfo->tc_info.mqprio_active) + return; + + for (i = 0; i < HNAE3_MAX_TC; i++) { + if (hdev->hw_tc_map & BIT(i) && i < kinfo->tc_info.num_tc) { + kinfo->tc_info.tqp_offset[i] = i * kinfo->rss_size; + kinfo->tc_info.tqp_count[i] = kinfo->rss_size; + } else { + /* Set to default queue if TC is disable */ + kinfo->tc_info.tqp_offset[i] = 0; + kinfo->tc_info.tqp_count[i] = 1; + } + } + + memcpy(kinfo->tc_info.prio_tc, hdev->tm_info.prio_tc, + sizeof_field(struct hnae3_tc_info, prio_tc)); +} + +static void hclge_tm_vport_info_update(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = hdev->vport; + u32 i; + + for (i = 0; i < hdev->num_alloc_vport; i++) { + hclge_tm_vport_tc_info_update(vport); + + vport++; + } +} + +static void hclge_tm_tc_info_init(struct hclge_dev *hdev) +{ + u8 i, tc_sch_mode; + u32 bw_limit; + + for (i = 0; i < hdev->tc_max; i++) { + if (i < hdev->tm_info.num_tc) { + tc_sch_mode = HCLGE_SCH_MODE_DWRR; + bw_limit = hdev->tm_info.pg_info[0].bw_limit; + } else { + tc_sch_mode = HCLGE_SCH_MODE_SP; + bw_limit = 0; + } + + hdev->tm_info.tc_info[i].tc_id = i; + hdev->tm_info.tc_info[i].tc_sch_mode = tc_sch_mode; + hdev->tm_info.tc_info[i].pgid = 0; + hdev->tm_info.tc_info[i].bw_limit = bw_limit; + } + + for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) + hdev->tm_info.prio_tc[i] = + (i >= hdev->tm_info.num_tc) ? 0 : i; +} + +static void hclge_tm_pg_info_init(struct hclge_dev *hdev) +{ +#define BW_PERCENT 100 +#define DEFAULT_BW_WEIGHT 1 + + u8 i; + + for (i = 0; i < hdev->tm_info.num_pg; i++) { + int k; + + hdev->tm_info.pg_dwrr[i] = i ? 0 : BW_PERCENT; + + hdev->tm_info.pg_info[i].pg_id = i; + hdev->tm_info.pg_info[i].pg_sch_mode = HCLGE_SCH_MODE_DWRR; + + hdev->tm_info.pg_info[i].bw_limit = + hdev->ae_dev->dev_specs.max_tm_rate; + + if (i != 0) + continue; + + hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map; + for (k = 0; k < hdev->tm_info.num_tc; k++) + hdev->tm_info.pg_info[i].tc_dwrr[k] = BW_PERCENT; + for (; k < HNAE3_MAX_TC; k++) + hdev->tm_info.pg_info[i].tc_dwrr[k] = DEFAULT_BW_WEIGHT; + } +} + +static void hclge_update_fc_mode_by_dcb_flag(struct hclge_dev *hdev) +{ + if (hdev->tm_info.num_tc == 1 && !hdev->tm_info.pfc_en) { + if (hdev->fc_mode_last_time == HCLGE_FC_PFC) + dev_warn(&hdev->pdev->dev, + "Only 1 tc used, but last mode is FC_PFC\n"); + + hdev->tm_info.fc_mode = hdev->fc_mode_last_time; + } else if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) { + /* fc_mode_last_time record the last fc_mode when + * DCB is enabled, so that fc_mode can be set to + * the correct value when DCB is disabled. + */ + hdev->fc_mode_last_time = hdev->tm_info.fc_mode; + hdev->tm_info.fc_mode = HCLGE_FC_PFC; + } +} + +static void hclge_update_fc_mode(struct hclge_dev *hdev) +{ + if (!hdev->tm_info.pfc_en) { + hdev->tm_info.fc_mode = hdev->fc_mode_last_time; + return; + } + + if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) { + hdev->fc_mode_last_time = hdev->tm_info.fc_mode; + hdev->tm_info.fc_mode = HCLGE_FC_PFC; + } +} + +void hclge_tm_pfc_info_update(struct hclge_dev *hdev) +{ + if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) + hclge_update_fc_mode(hdev); + else + hclge_update_fc_mode_by_dcb_flag(hdev); +} + +static void hclge_tm_schd_info_init(struct hclge_dev *hdev) +{ + hclge_tm_pg_info_init(hdev); + + hclge_tm_tc_info_init(hdev); + + hclge_tm_vport_info_update(hdev); + + hclge_tm_pfc_info_update(hdev); +} + +static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev) +{ + int ret; + u32 i; + + if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) + return 0; + + for (i = 0; i < hdev->tm_info.num_pg; i++) { + /* Cfg mapping */ + ret = hclge_tm_pg_to_pri_map_cfg( + hdev, i, hdev->tm_info.pg_info[i].tc_bit_map); + if (ret) + return ret; + } + + return 0; +} + +static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev) +{ + u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate; + struct hclge_shaper_ir_para ir_para; + u32 shaper_para; + int ret; + u32 i; + + /* Cfg pg schd */ + if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) + return 0; + + /* Pg to pri */ + for (i = 0; i < hdev->tm_info.num_pg; i++) { + u32 rate = hdev->tm_info.pg_info[i].bw_limit; + + /* Calc shaper para */ + ret = hclge_shaper_para_calc(rate, HCLGE_SHAPER_LVL_PG, + &ir_para, max_tm_rate); + if (ret) + return ret; + + shaper_para = hclge_tm_get_shapping_para(0, 0, 0, + HCLGE_SHAPER_BS_U_DEF, + HCLGE_SHAPER_BS_S_DEF); + ret = hclge_tm_pg_shapping_cfg(hdev, + HCLGE_TM_SHAP_C_BUCKET, i, + shaper_para, rate); + if (ret) + return ret; + + shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b, + ir_para.ir_u, + ir_para.ir_s, + HCLGE_SHAPER_BS_U_DEF, + HCLGE_SHAPER_BS_S_DEF); + ret = hclge_tm_pg_shapping_cfg(hdev, + HCLGE_TM_SHAP_P_BUCKET, i, + shaper_para, rate); + if (ret) + return ret; + } + + return 0; +} + +static int hclge_tm_pg_dwrr_cfg(struct hclge_dev *hdev) +{ + int ret; + u32 i; + + /* cfg pg schd */ + if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) + return 0; + + /* pg to prio */ + for (i = 0; i < hdev->tm_info.num_pg; i++) { + /* Cfg dwrr */ + ret = hclge_tm_pg_weight_cfg(hdev, i, hdev->tm_info.pg_dwrr[i]); + if (ret) + return ret; + } + + return 0; +} + +static int hclge_vport_q_to_qs_map(struct hclge_dev *hdev, + struct hclge_vport *vport) +{ + struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; + struct hnae3_tc_info *tc_info = &kinfo->tc_info; + struct hnae3_queue **tqp = kinfo->tqp; + u32 i, j; + int ret; + + for (i = 0; i < tc_info->num_tc; i++) { + for (j = 0; j < tc_info->tqp_count[i]; j++) { + struct hnae3_queue *q = tqp[tc_info->tqp_offset[i] + j]; + + ret = hclge_tm_q_to_qs_map_cfg(hdev, + hclge_get_queue_id(q), + vport->qs_offset + i); + if (ret) + return ret; + } + } + + return 0; +} + +static int hclge_tm_pri_q_qs_cfg_tc_base(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = hdev->vport; + u16 i, k; + int ret; + + /* Cfg qs -> pri mapping, one by one mapping */ + for (k = 0; k < hdev->num_alloc_vport; k++) { + struct hnae3_knic_private_info *kinfo = &vport[k].nic.kinfo; + + for (i = 0; i < kinfo->tc_info.max_tc; i++) { + u8 pri = i < kinfo->tc_info.num_tc ? i : 0; + bool link_vld = i < kinfo->tc_info.num_tc; + + ret = hclge_tm_qs_to_pri_map_cfg(hdev, + vport[k].qs_offset + i, + pri, link_vld); + if (ret) + return ret; + } + } + + return 0; +} + +static int hclge_tm_pri_q_qs_cfg_vnet_base(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = hdev->vport; + u16 i, k; + int ret; + + /* Cfg qs -> pri mapping, qs = tc, pri = vf, 8 qs -> 1 pri */ + for (k = 0; k < hdev->num_alloc_vport; k++) + for (i = 0; i < HNAE3_MAX_TC; i++) { + ret = hclge_tm_qs_to_pri_map_cfg(hdev, + vport[k].qs_offset + i, + k, true); + if (ret) + return ret; + } + + return 0; +} + +static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = hdev->vport; + int ret; + u32 i; + + if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) + ret = hclge_tm_pri_q_qs_cfg_tc_base(hdev); + else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) + ret = hclge_tm_pri_q_qs_cfg_vnet_base(hdev); + else + return -EINVAL; + + if (ret) + return ret; + + /* Cfg q -> qs mapping */ + for (i = 0; i < hdev->num_alloc_vport; i++) { + ret = hclge_vport_q_to_qs_map(hdev, vport); + if (ret) + return ret; + + vport++; + } + + return 0; +} + +static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev) +{ + u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate; + struct hclge_shaper_ir_para ir_para; + u32 shaper_para_c, shaper_para_p; + int ret; + u32 i; + + for (i = 0; i < hdev->tc_max; i++) { + u32 rate = hdev->tm_info.tc_info[i].bw_limit; + + if (rate) { + ret = hclge_shaper_para_calc(rate, HCLGE_SHAPER_LVL_PRI, + &ir_para, max_tm_rate); + if (ret) + return ret; + + shaper_para_c = hclge_tm_get_shapping_para(0, 0, 0, + HCLGE_SHAPER_BS_U_DEF, + HCLGE_SHAPER_BS_S_DEF); + shaper_para_p = hclge_tm_get_shapping_para(ir_para.ir_b, + ir_para.ir_u, + ir_para.ir_s, + HCLGE_SHAPER_BS_U_DEF, + HCLGE_SHAPER_BS_S_DEF); + } else { + shaper_para_c = 0; + shaper_para_p = 0; + } + + ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, i, + shaper_para_c, rate); + if (ret) + return ret; + + ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, i, + shaper_para_p, rate); + if (ret) + return ret; + } + + return 0; +} + +static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport) +{ + struct hclge_dev *hdev = vport->back; + struct hclge_shaper_ir_para ir_para; + u32 shaper_para; + int ret; + + ret = hclge_shaper_para_calc(vport->bw_limit, HCLGE_SHAPER_LVL_VF, + &ir_para, + hdev->ae_dev->dev_specs.max_tm_rate); + if (ret) + return ret; + + shaper_para = hclge_tm_get_shapping_para(0, 0, 0, + HCLGE_SHAPER_BS_U_DEF, + HCLGE_SHAPER_BS_S_DEF); + ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, + vport->vport_id, shaper_para, + vport->bw_limit); + if (ret) + return ret; + + shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u, + ir_para.ir_s, + HCLGE_SHAPER_BS_U_DEF, + HCLGE_SHAPER_BS_S_DEF); + ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, + vport->vport_id, shaper_para, + vport->bw_limit); + if (ret) + return ret; + + return 0; +} + +static int hclge_tm_pri_vnet_base_shaper_qs_cfg(struct hclge_vport *vport) +{ + struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; + struct hclge_dev *hdev = vport->back; + u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate; + struct hclge_shaper_ir_para ir_para; + u32 i; + int ret; + + for (i = 0; i < kinfo->tc_info.num_tc; i++) { + ret = hclge_shaper_para_calc(hdev->tm_info.tc_info[i].bw_limit, + HCLGE_SHAPER_LVL_QSET, + &ir_para, max_tm_rate); + if (ret) + return ret; + } + + return 0; +} + +static int hclge_tm_pri_vnet_base_shaper_cfg(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = hdev->vport; + int ret; + u32 i; + + /* Need config vport shaper */ + for (i = 0; i < hdev->num_alloc_vport; i++) { + ret = hclge_tm_pri_vnet_base_shaper_pri_cfg(vport); + if (ret) + return ret; + + ret = hclge_tm_pri_vnet_base_shaper_qs_cfg(vport); + if (ret) + return ret; + + vport++; + } + + return 0; +} + +static int hclge_tm_pri_shaper_cfg(struct hclge_dev *hdev) +{ + int ret; + + if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) { + ret = hclge_tm_pri_tc_base_shaper_cfg(hdev); + if (ret) + return ret; + } else { + ret = hclge_tm_pri_vnet_base_shaper_cfg(hdev); + if (ret) + return ret; + } + + return 0; +} + +static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = hdev->vport; + struct hclge_pg_info *pg_info; + u8 dwrr; + int ret; + u32 i, k; + + for (i = 0; i < hdev->tc_max; i++) { + pg_info = + &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid]; + dwrr = pg_info->tc_dwrr[i]; + + ret = hclge_tm_pri_weight_cfg(hdev, i, dwrr); + if (ret) + return ret; + + for (k = 0; k < hdev->num_alloc_vport; k++) { + struct hnae3_knic_private_info *kinfo = &vport[k].nic.kinfo; + + if (i >= kinfo->tc_info.max_tc) + continue; + + dwrr = i < kinfo->tc_info.num_tc ? vport[k].dwrr : 0; + ret = hclge_tm_qs_weight_cfg( + hdev, vport[k].qs_offset + i, + dwrr); + if (ret) + return ret; + } + } + + return 0; +} + +static int hclge_tm_ets_tc_dwrr_cfg(struct hclge_dev *hdev) +{ +#define DEFAULT_TC_OFFSET 14 + + struct hclge_ets_tc_weight_cmd *ets_weight; + struct hclge_desc desc; + unsigned int i; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, false); + ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data; + + for (i = 0; i < HNAE3_MAX_TC; i++) { + struct hclge_pg_info *pg_info; + + pg_info = &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid]; + ets_weight->tc_weight[i] = pg_info->tc_dwrr[i]; + } + + ets_weight->weight_offset = DEFAULT_TC_OFFSET; + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_tm_pri_vnet_base_dwrr_pri_cfg(struct hclge_vport *vport) +{ + struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; + struct hclge_dev *hdev = vport->back; + int ret; + u8 i; + + /* Vf dwrr */ + ret = hclge_tm_pri_weight_cfg(hdev, vport->vport_id, vport->dwrr); + if (ret) + return ret; + + /* Qset dwrr */ + for (i = 0; i < kinfo->tc_info.num_tc; i++) { + ret = hclge_tm_qs_weight_cfg( + hdev, vport->qs_offset + i, + hdev->tm_info.pg_info[0].tc_dwrr[i]); + if (ret) + return ret; + } + + return 0; +} + +static int hclge_tm_pri_vnet_base_dwrr_cfg(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = hdev->vport; + int ret; + u32 i; + + for (i = 0; i < hdev->num_alloc_vport; i++) { + ret = hclge_tm_pri_vnet_base_dwrr_pri_cfg(vport); + if (ret) + return ret; + + vport++; + } + + return 0; +} + +static int hclge_tm_pri_dwrr_cfg(struct hclge_dev *hdev) +{ + int ret; + + if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) { + ret = hclge_tm_pri_tc_base_dwrr_cfg(hdev); + if (ret) + return ret; + + if (!hnae3_dev_dcb_supported(hdev)) + return 0; + + ret = hclge_tm_ets_tc_dwrr_cfg(hdev); + if (ret == -EOPNOTSUPP) { + dev_warn(&hdev->pdev->dev, + "fw %08x doesn't support ets tc weight cmd\n", + hdev->fw_version); + ret = 0; + } + + return ret; + } else { + ret = hclge_tm_pri_vnet_base_dwrr_cfg(hdev); + if (ret) + return ret; + } + + return 0; +} + +static int hclge_tm_map_cfg(struct hclge_dev *hdev) +{ + int ret; + + ret = hclge_up_to_tc_map(hdev); + if (ret) + return ret; + + if (hdev->vport[0].nic.kinfo.tc_map_mode == HNAE3_TC_MAP_MODE_DSCP) { + ret = hclge_dscp_to_tc_map(hdev); + if (ret) + return ret; + } + + ret = hclge_tm_pg_to_pri_map(hdev); + if (ret) + return ret; + + return hclge_tm_pri_q_qs_cfg(hdev); +} + +static int hclge_tm_shaper_cfg(struct hclge_dev *hdev) +{ + int ret; + + ret = hclge_tm_port_shaper_cfg(hdev); + if (ret) + return ret; + + ret = hclge_tm_pg_shaper_cfg(hdev); + if (ret) + return ret; + + return hclge_tm_pri_shaper_cfg(hdev); +} + +int hclge_tm_dwrr_cfg(struct hclge_dev *hdev) +{ + int ret; + + ret = hclge_tm_pg_dwrr_cfg(hdev); + if (ret) + return ret; + + return hclge_tm_pri_dwrr_cfg(hdev); +} + +static int hclge_tm_lvl2_schd_mode_cfg(struct hclge_dev *hdev) +{ + int ret; + u8 i; + + /* Only being config on TC-Based scheduler mode */ + if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) + return 0; + + for (i = 0; i < hdev->tm_info.num_pg; i++) { + ret = hclge_tm_pg_schd_mode_cfg(hdev, i); + if (ret) + return ret; + } + + return 0; +} + +static int hclge_tm_schd_mode_tc_base_cfg(struct hclge_dev *hdev, u8 pri_id) +{ + struct hclge_vport *vport = hdev->vport; + int ret; + u8 mode; + u16 i; + + ret = hclge_tm_pri_schd_mode_cfg(hdev, pri_id); + if (ret) + return ret; + + for (i = 0; i < hdev->num_alloc_vport; i++) { + struct hnae3_knic_private_info *kinfo = &vport[i].nic.kinfo; + + if (pri_id >= kinfo->tc_info.max_tc) + continue; + + mode = pri_id < kinfo->tc_info.num_tc ? HCLGE_SCH_MODE_DWRR : + HCLGE_SCH_MODE_SP; + ret = hclge_tm_qs_schd_mode_cfg(hdev, + vport[i].qs_offset + pri_id, + mode); + if (ret) + return ret; + } + + return 0; +} + +static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport) +{ + struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; + struct hclge_dev *hdev = vport->back; + int ret; + u8 i; + + if (vport->vport_id >= HNAE3_MAX_TC) + return -EINVAL; + + ret = hclge_tm_pri_schd_mode_cfg(hdev, vport->vport_id); + if (ret) + return ret; + + for (i = 0; i < kinfo->tc_info.num_tc; i++) { + u8 sch_mode = hdev->tm_info.tc_info[i].tc_sch_mode; + + ret = hclge_tm_qs_schd_mode_cfg(hdev, vport->qs_offset + i, + sch_mode); + if (ret) + return ret; + } + + return 0; +} + +static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = hdev->vport; + int ret; + u8 i; + + if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) { + for (i = 0; i < hdev->tc_max; i++) { + ret = hclge_tm_schd_mode_tc_base_cfg(hdev, i); + if (ret) + return ret; + } + } else { + for (i = 0; i < hdev->num_alloc_vport; i++) { + ret = hclge_tm_schd_mode_vnet_base_cfg(vport); + if (ret) + return ret; + + vport++; + } + } + + return 0; +} + +static int hclge_tm_schd_mode_hw(struct hclge_dev *hdev) +{ + int ret; + + ret = hclge_tm_lvl2_schd_mode_cfg(hdev); + if (ret) + return ret; + + return hclge_tm_lvl34_schd_mode_cfg(hdev); +} + +int hclge_tm_schd_setup_hw(struct hclge_dev *hdev) +{ + int ret; + + /* Cfg tm mapping */ + ret = hclge_tm_map_cfg(hdev); + if (ret) + return ret; + + /* Cfg tm shaper */ + ret = hclge_tm_shaper_cfg(hdev); + if (ret) + return ret; + + /* Cfg dwrr */ + ret = hclge_tm_dwrr_cfg(hdev); + if (ret) + return ret; + + /* Cfg schd mode for each level schd */ + return hclge_tm_schd_mode_hw(hdev); +} + +static int hclge_pause_param_setup_hw(struct hclge_dev *hdev) +{ + struct hclge_mac *mac = &hdev->hw.mac; + + return hclge_pause_param_cfg(hdev, mac->mac_addr, + HCLGE_DEFAULT_PAUSE_TRANS_GAP, + HCLGE_DEFAULT_PAUSE_TRANS_TIME); +} + +static int hclge_pfc_setup_hw(struct hclge_dev *hdev) +{ + u8 enable_bitmap = 0; + + if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) + enable_bitmap = HCLGE_TX_MAC_PAUSE_EN_MSK | + HCLGE_RX_MAC_PAUSE_EN_MSK; + + return hclge_pfc_pause_en_cfg(hdev, enable_bitmap, + hdev->tm_info.pfc_en); +} + +/* for the queues that use for backpress, divides to several groups, + * each group contains 32 queue sets, which can be represented by u32 bitmap. + */ +static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc) +{ + u16 grp_id_shift = HCLGE_BP_GRP_ID_S; + u16 grp_id_mask = HCLGE_BP_GRP_ID_M; + u8 grp_num = HCLGE_BP_GRP_NUM; + int i; + + if (hdev->num_tqps > HCLGE_TQP_MAX_SIZE_DEV_V2) { + grp_num = HCLGE_BP_EXT_GRP_NUM; + grp_id_mask = HCLGE_BP_EXT_GRP_ID_M; + grp_id_shift = HCLGE_BP_EXT_GRP_ID_S; + } + + for (i = 0; i < grp_num; i++) { + u32 qs_bitmap = 0; + int k, ret; + + for (k = 0; k < hdev->num_alloc_vport; k++) { + struct hclge_vport *vport = &hdev->vport[k]; + u16 qs_id = vport->qs_offset + tc; + u8 grp, sub_grp; + + grp = hnae3_get_field(qs_id, grp_id_mask, grp_id_shift); + sub_grp = hnae3_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M, + HCLGE_BP_SUB_GRP_ID_S); + if (i == grp) + qs_bitmap |= (1 << sub_grp); + } + + ret = hclge_tm_qs_bp_cfg(hdev, tc, i, qs_bitmap); + if (ret) + return ret; + } + + return 0; +} + +int hclge_mac_pause_setup_hw(struct hclge_dev *hdev) +{ + bool tx_en, rx_en; + + switch (hdev->tm_info.fc_mode) { + case HCLGE_FC_NONE: + tx_en = false; + rx_en = false; + break; + case HCLGE_FC_RX_PAUSE: + tx_en = false; + rx_en = true; + break; + case HCLGE_FC_TX_PAUSE: + tx_en = true; + rx_en = false; + break; + case HCLGE_FC_FULL: + tx_en = true; + rx_en = true; + break; + case HCLGE_FC_PFC: + tx_en = false; + rx_en = false; + break; + default: + tx_en = true; + rx_en = true; + } + + return hclge_mac_pause_en_cfg(hdev, tx_en, rx_en); +} + +static int hclge_tm_bp_setup(struct hclge_dev *hdev) +{ + int ret; + int i; + + for (i = 0; i < hdev->tm_info.num_tc; i++) { + ret = hclge_bp_setup_hw(hdev, i); + if (ret) + return ret; + } + + return 0; +} + +int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init) +{ + int ret; + + ret = hclge_pause_param_setup_hw(hdev); + if (ret) + return ret; + + ret = hclge_mac_pause_setup_hw(hdev); + if (ret) + return ret; + + /* Only DCB-supported dev supports qset back pressure and pfc cmd */ + if (!hnae3_dev_dcb_supported(hdev)) + return 0; + + /* GE MAC does not support PFC, when driver is initializing and MAC + * is in GE Mode, ignore the error here, otherwise initialization + * will fail. + */ + ret = hclge_pfc_setup_hw(hdev); + if (init && ret == -EOPNOTSUPP) + dev_warn(&hdev->pdev->dev, "GE MAC does not support pfc\n"); + else if (ret) { + dev_err(&hdev->pdev->dev, "config pfc failed! ret = %d\n", + ret); + return ret; + } + + return hclge_tm_bp_setup(hdev); +} + +void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc) +{ + struct hclge_vport *vport = hdev->vport; + struct hnae3_knic_private_info *kinfo; + u32 i, k; + + for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) { + hdev->tm_info.prio_tc[i] = prio_tc[i]; + + for (k = 0; k < hdev->num_alloc_vport; k++) { + kinfo = &vport[k].nic.kinfo; + kinfo->tc_info.prio_tc[i] = prio_tc[i]; + } + } +} + +void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc) +{ + u8 bit_map = 0; + u8 i; + + hdev->tm_info.num_tc = num_tc; + + for (i = 0; i < hdev->tm_info.num_tc; i++) + bit_map |= BIT(i); + + if (!bit_map) { + bit_map = 1; + hdev->tm_info.num_tc = 1; + } + + hdev->hw_tc_map = bit_map; + + hclge_tm_schd_info_init(hdev); +} + +int hclge_tm_init_hw(struct hclge_dev *hdev, bool init) +{ + int ret; + + if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) && + (hdev->tx_sch_mode != HCLGE_FLAG_VNET_BASE_SCH_MODE)) + return -ENOTSUPP; + + ret = hclge_tm_schd_setup_hw(hdev); + if (ret) + return ret; + + ret = hclge_pause_setup_hw(hdev, init); + if (ret) + return ret; + + return 0; +} + +int hclge_tm_schd_init(struct hclge_dev *hdev) +{ + /* fc_mode is HCLGE_FC_FULL on reset */ + hdev->tm_info.fc_mode = HCLGE_FC_FULL; + hdev->fc_mode_last_time = hdev->tm_info.fc_mode; + + if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE && + hdev->tm_info.num_pg != 1) + return -EINVAL; + + hclge_tm_schd_info_init(hdev); + hclge_dscp_to_prio_map_init(hdev); + + return hclge_tm_init_hw(hdev, true); +} + +int hclge_tm_vport_map_update(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = hdev->vport; + int ret; + + hclge_tm_vport_tc_info_update(vport); + + ret = hclge_vport_q_to_qs_map(hdev, vport); + if (ret) + return ret; + + if (hdev->tm_info.num_tc == 1 && !hdev->tm_info.pfc_en) + return 0; + + return hclge_tm_bp_setup(hdev); +} + +int hclge_tm_get_qset_num(struct hclge_dev *hdev, u16 *qset_num) +{ + struct hclge_tm_nodes_cmd *nodes; + struct hclge_desc desc; + int ret; + + if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) { + /* Each PF has 8 qsets and each VF has 1 qset */ + *qset_num = HCLGE_TM_PF_MAX_QSET_NUM + pci_num_vf(hdev->pdev); + return 0; + } + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get qset num, ret = %d\n", ret); + return ret; + } + + nodes = (struct hclge_tm_nodes_cmd *)desc.data; + *qset_num = le16_to_cpu(nodes->qset_num); + return 0; +} + +int hclge_tm_get_pri_num(struct hclge_dev *hdev, u8 *pri_num) +{ + struct hclge_tm_nodes_cmd *nodes; + struct hclge_desc desc; + int ret; + + if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) { + *pri_num = HCLGE_TM_PF_MAX_PRI_NUM; + return 0; + } + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get pri num, ret = %d\n", ret); + return ret; + } + + nodes = (struct hclge_tm_nodes_cmd *)desc.data; + *pri_num = nodes->pri_num; + return 0; +} + +int hclge_tm_get_qset_map_pri(struct hclge_dev *hdev, u16 qset_id, u8 *priority, + u8 *link_vld) +{ + struct hclge_qs_to_pri_link_cmd *map; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, true); + map = (struct hclge_qs_to_pri_link_cmd *)desc.data; + map->qs_id = cpu_to_le16(qset_id); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get qset map priority, ret = %d\n", ret); + return ret; + } + + *priority = map->priority; + *link_vld = map->link_vld; + return 0; +} + +int hclge_tm_get_qset_sch_mode(struct hclge_dev *hdev, u16 qset_id, u8 *mode) +{ + struct hclge_qs_sch_mode_cfg_cmd *qs_sch_mode; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, true); + qs_sch_mode = (struct hclge_qs_sch_mode_cfg_cmd *)desc.data; + qs_sch_mode->qs_id = cpu_to_le16(qset_id); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get qset sch mode, ret = %d\n", ret); + return ret; + } + + *mode = qs_sch_mode->sch_mode; + return 0; +} + +int hclge_tm_get_qset_weight(struct hclge_dev *hdev, u16 qset_id, u8 *weight) +{ + struct hclge_qs_weight_cmd *qs_weight; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, true); + qs_weight = (struct hclge_qs_weight_cmd *)desc.data; + qs_weight->qs_id = cpu_to_le16(qset_id); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get qset weight, ret = %d\n", ret); + return ret; + } + + *weight = qs_weight->dwrr; + return 0; +} + +int hclge_tm_get_qset_shaper(struct hclge_dev *hdev, u16 qset_id, + struct hclge_tm_shaper_para *para) +{ + struct hclge_qs_shapping_cmd *shap_cfg_cmd; + struct hclge_desc desc; + u32 shapping_para; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG, true); + shap_cfg_cmd = (struct hclge_qs_shapping_cmd *)desc.data; + shap_cfg_cmd->qs_id = cpu_to_le16(qset_id); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get qset %u shaper, ret = %d\n", qset_id, + ret); + return ret; + } + + shapping_para = le32_to_cpu(shap_cfg_cmd->qs_shapping_para); + para->ir_b = hclge_tm_get_field(shapping_para, IR_B); + para->ir_u = hclge_tm_get_field(shapping_para, IR_U); + para->ir_s = hclge_tm_get_field(shapping_para, IR_S); + para->bs_b = hclge_tm_get_field(shapping_para, BS_B); + para->bs_s = hclge_tm_get_field(shapping_para, BS_S); + para->flag = shap_cfg_cmd->flag; + para->rate = le32_to_cpu(shap_cfg_cmd->qs_rate); + return 0; +} + +int hclge_tm_get_pri_sch_mode(struct hclge_dev *hdev, u8 pri_id, u8 *mode) +{ + struct hclge_pri_sch_mode_cfg_cmd *pri_sch_mode; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, true); + pri_sch_mode = (struct hclge_pri_sch_mode_cfg_cmd *)desc.data; + pri_sch_mode->pri_id = pri_id; + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get priority sch mode, ret = %d\n", ret); + return ret; + } + + *mode = pri_sch_mode->sch_mode; + return 0; +} + +int hclge_tm_get_pri_weight(struct hclge_dev *hdev, u8 pri_id, u8 *weight) +{ + struct hclge_priority_weight_cmd *priority_weight; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, true); + priority_weight = (struct hclge_priority_weight_cmd *)desc.data; + priority_weight->pri_id = pri_id; + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get priority weight, ret = %d\n", ret); + return ret; + } + + *weight = priority_weight->dwrr; + return 0; +} + +int hclge_tm_get_pri_shaper(struct hclge_dev *hdev, u8 pri_id, + enum hclge_opcode_type cmd, + struct hclge_tm_shaper_para *para) +{ + struct hclge_pri_shapping_cmd *shap_cfg_cmd; + struct hclge_desc desc; + u32 shapping_para; + int ret; + + if (cmd != HCLGE_OPC_TM_PRI_C_SHAPPING && + cmd != HCLGE_OPC_TM_PRI_P_SHAPPING) + return -EINVAL; + + hclge_cmd_setup_basic_desc(&desc, cmd, true); + shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data; + shap_cfg_cmd->pri_id = pri_id; + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get priority shaper(%#x), ret = %d\n", + cmd, ret); + return ret; + } + + shapping_para = le32_to_cpu(shap_cfg_cmd->pri_shapping_para); + para->ir_b = hclge_tm_get_field(shapping_para, IR_B); + para->ir_u = hclge_tm_get_field(shapping_para, IR_U); + para->ir_s = hclge_tm_get_field(shapping_para, IR_S); + para->bs_b = hclge_tm_get_field(shapping_para, BS_B); + para->bs_s = hclge_tm_get_field(shapping_para, BS_S); + para->flag = shap_cfg_cmd->flag; + para->rate = le32_to_cpu(shap_cfg_cmd->pri_rate); + return 0; +} + +int hclge_tm_get_q_to_qs_map(struct hclge_dev *hdev, u16 q_id, u16 *qset_id) +{ + struct hclge_nq_to_qs_link_cmd *map; + struct hclge_desc desc; + u16 qs_id_l; + u16 qs_id_h; + int ret; + + map = (struct hclge_nq_to_qs_link_cmd *)desc.data; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, true); + map->nq_id = cpu_to_le16(q_id); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get queue to qset map, ret = %d\n", ret); + return ret; + } + *qset_id = le16_to_cpu(map->qset_id); + + /* convert qset_id to the following format, drop the vld bit + * | qs_id_h | vld | qs_id_l | + * qset_id: | 15 ~ 11 | 10 | 9 ~ 0 | + * \ \ / / + * \ \ / / + * qset_id: | 15 | 14 ~ 10 | 9 ~ 0 | + */ + qs_id_l = hnae3_get_field(*qset_id, HCLGE_TM_QS_ID_L_MSK, + HCLGE_TM_QS_ID_L_S); + qs_id_h = hnae3_get_field(*qset_id, HCLGE_TM_QS_ID_H_EXT_MSK, + HCLGE_TM_QS_ID_H_EXT_S); + *qset_id = 0; + hnae3_set_field(*qset_id, HCLGE_TM_QS_ID_L_MSK, HCLGE_TM_QS_ID_L_S, + qs_id_l); + hnae3_set_field(*qset_id, HCLGE_TM_QS_ID_H_MSK, HCLGE_TM_QS_ID_H_S, + qs_id_h); + return 0; +} + +int hclge_tm_get_q_to_tc(struct hclge_dev *hdev, u16 q_id, u8 *tc_id) +{ +#define HCLGE_TM_TC_MASK 0x7 + + struct hclge_tqp_tx_queue_tc_cmd *tc; + struct hclge_desc desc; + int ret; + + tc = (struct hclge_tqp_tx_queue_tc_cmd *)desc.data; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TQP_TX_QUEUE_TC, true); + tc->queue_id = cpu_to_le16(q_id); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get queue to tc map, ret = %d\n", ret); + return ret; + } + + *tc_id = tc->tc_id & HCLGE_TM_TC_MASK; + return 0; +} + +int hclge_tm_get_pg_to_pri_map(struct hclge_dev *hdev, u8 pg_id, + u8 *pri_bit_map) +{ + struct hclge_pg_to_pri_link_cmd *map; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_TO_PRI_LINK, true); + map = (struct hclge_pg_to_pri_link_cmd *)desc.data; + map->pg_id = pg_id; + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get pg to pri map, ret = %d\n", ret); + return ret; + } + + *pri_bit_map = map->pri_bit_map; + return 0; +} + +int hclge_tm_get_pg_weight(struct hclge_dev *hdev, u8 pg_id, u8 *weight) +{ + struct hclge_pg_weight_cmd *pg_weight_cmd; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_WEIGHT, true); + pg_weight_cmd = (struct hclge_pg_weight_cmd *)desc.data; + pg_weight_cmd->pg_id = pg_id; + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get pg weight, ret = %d\n", ret); + return ret; + } + + *weight = pg_weight_cmd->dwrr; + return 0; +} + +int hclge_tm_get_pg_sch_mode(struct hclge_dev *hdev, u8 pg_id, u8 *mode) +{ + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_SCH_MODE_CFG, true); + desc.data[0] = cpu_to_le32(pg_id); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get pg sch mode, ret = %d\n", ret); + return ret; + } + + *mode = (u8)le32_to_cpu(desc.data[1]); + return 0; +} + +int hclge_tm_get_pg_shaper(struct hclge_dev *hdev, u8 pg_id, + enum hclge_opcode_type cmd, + struct hclge_tm_shaper_para *para) +{ + struct hclge_pg_shapping_cmd *shap_cfg_cmd; + struct hclge_desc desc; + u32 shapping_para; + int ret; + + if (cmd != HCLGE_OPC_TM_PG_C_SHAPPING && + cmd != HCLGE_OPC_TM_PG_P_SHAPPING) + return -EINVAL; + + hclge_cmd_setup_basic_desc(&desc, cmd, true); + shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data; + shap_cfg_cmd->pg_id = pg_id; + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get pg shaper(%#x), ret = %d\n", + cmd, ret); + return ret; + } + + shapping_para = le32_to_cpu(shap_cfg_cmd->pg_shapping_para); + para->ir_b = hclge_tm_get_field(shapping_para, IR_B); + para->ir_u = hclge_tm_get_field(shapping_para, IR_U); + para->ir_s = hclge_tm_get_field(shapping_para, IR_S); + para->bs_b = hclge_tm_get_field(shapping_para, BS_B); + para->bs_s = hclge_tm_get_field(shapping_para, BS_S); + para->flag = shap_cfg_cmd->flag; + para->rate = le32_to_cpu(shap_cfg_cmd->pg_rate); + return 0; +} + +int hclge_tm_get_port_shaper(struct hclge_dev *hdev, + struct hclge_tm_shaper_para *para) +{ + struct hclge_port_shapping_cmd *port_shap_cfg_cmd; + struct hclge_desc desc; + u32 shapping_para; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get port shaper, ret = %d\n", ret); + return ret; + } + + port_shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data; + shapping_para = le32_to_cpu(port_shap_cfg_cmd->port_shapping_para); + para->ir_b = hclge_tm_get_field(shapping_para, IR_B); + para->ir_u = hclge_tm_get_field(shapping_para, IR_U); + para->ir_s = hclge_tm_get_field(shapping_para, IR_S); + para->bs_b = hclge_tm_get_field(shapping_para, BS_B); + para->bs_s = hclge_tm_get_field(shapping_para, BS_S); + para->flag = port_shap_cfg_cmd->flag; + para->rate = le32_to_cpu(port_shap_cfg_cmd->port_rate); + + return 0; +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h new file mode 100644 index 000000000..251e80845 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h @@ -0,0 +1,276 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +// Copyright (c) 2016-2017 Hisilicon Limited. + +#ifndef __HCLGE_TM_H +#define __HCLGE_TM_H + +#include + +#include "hnae3.h" + +struct hclge_dev; +struct hclge_vport; +enum hclge_opcode_type; + +/* MAC Pause */ +#define HCLGE_TX_MAC_PAUSE_EN_MSK BIT(0) +#define HCLGE_RX_MAC_PAUSE_EN_MSK BIT(1) + +#define HCLGE_TM_PORT_BASE_MODE_MSK BIT(0) + +#define HCLGE_DEFAULT_PAUSE_TRANS_GAP 0x7F +#define HCLGE_DEFAULT_PAUSE_TRANS_TIME 0xFFFF + +/* SP or DWRR */ +#define HCLGE_TM_TX_SCHD_DWRR_MSK BIT(0) +#define HCLGE_TM_TX_SCHD_SP_MSK 0xFE + +#define HCLGE_ETHER_MAX_RATE 100000 + +#define HCLGE_TM_PF_MAX_PRI_NUM 8 +#define HCLGE_TM_PF_MAX_QSET_NUM 8 + +#define HCLGE_DSCP_MAP_TC_BD_NUM 2 +#define HCLGE_DSCP_TC_SHIFT(n) (((n) & 1) * 4) + +struct hclge_pg_to_pri_link_cmd { + u8 pg_id; + u8 rsvd1[3]; + u8 pri_bit_map; +}; + +struct hclge_qs_to_pri_link_cmd { + __le16 qs_id; + __le16 rsvd; + u8 priority; +#define HCLGE_TM_QS_PRI_LINK_VLD_MSK BIT(0) + u8 link_vld; +}; + +struct hclge_nq_to_qs_link_cmd { + __le16 nq_id; + __le16 rsvd; +#define HCLGE_TM_Q_QS_LINK_VLD_MSK BIT(10) +#define HCLGE_TM_QS_ID_L_MSK GENMASK(9, 0) +#define HCLGE_TM_QS_ID_L_S 0 +#define HCLGE_TM_QS_ID_H_MSK GENMASK(14, 10) +#define HCLGE_TM_QS_ID_H_S 10 +#define HCLGE_TM_QS_ID_H_EXT_S 11 +#define HCLGE_TM_QS_ID_H_EXT_MSK GENMASK(15, 11) + __le16 qset_id; +}; + +struct hclge_tqp_tx_queue_tc_cmd { + __le16 queue_id; + __le16 rsvd; + u8 tc_id; + u8 rev[3]; +}; + +struct hclge_pg_weight_cmd { + u8 pg_id; + u8 dwrr; +}; + +struct hclge_priority_weight_cmd { + u8 pri_id; + u8 dwrr; +}; + +struct hclge_pri_sch_mode_cfg_cmd { + u8 pri_id; + u8 rsvd[3]; + u8 sch_mode; +}; + +struct hclge_qs_sch_mode_cfg_cmd { + __le16 qs_id; + u8 rsvd[2]; + u8 sch_mode; +}; + +struct hclge_qs_weight_cmd { + __le16 qs_id; + u8 dwrr; +}; + +struct hclge_ets_tc_weight_cmd { + u8 tc_weight[HNAE3_MAX_TC]; + u8 weight_offset; + u8 rsvd[15]; +}; + +#define HCLGE_TM_SHAP_IR_B_MSK GENMASK(7, 0) +#define HCLGE_TM_SHAP_IR_B_LSH 0 +#define HCLGE_TM_SHAP_IR_U_MSK GENMASK(11, 8) +#define HCLGE_TM_SHAP_IR_U_LSH 8 +#define HCLGE_TM_SHAP_IR_S_MSK GENMASK(15, 12) +#define HCLGE_TM_SHAP_IR_S_LSH 12 +#define HCLGE_TM_SHAP_BS_B_MSK GENMASK(20, 16) +#define HCLGE_TM_SHAP_BS_B_LSH 16 +#define HCLGE_TM_SHAP_BS_S_MSK GENMASK(25, 21) +#define HCLGE_TM_SHAP_BS_S_LSH 21 + +enum hclge_shap_bucket { + HCLGE_TM_SHAP_C_BUCKET = 0, + HCLGE_TM_SHAP_P_BUCKET, +}; + +/* set bit HCLGE_TM_RATE_VLD to 1 means use 'rate' to config shaping */ +#define HCLGE_TM_RATE_VLD 0 + +struct hclge_pri_shapping_cmd { + u8 pri_id; + u8 rsvd[3]; + __le32 pri_shapping_para; + u8 flag; + u8 rsvd1[3]; + __le32 pri_rate; +}; + +struct hclge_pg_shapping_cmd { + u8 pg_id; + u8 rsvd[3]; + __le32 pg_shapping_para; + u8 flag; + u8 rsvd1[3]; + __le32 pg_rate; +}; + +struct hclge_qs_shapping_cmd { + __le16 qs_id; + u8 rsvd[2]; + __le32 qs_shapping_para; + u8 flag; + u8 rsvd1[3]; + __le32 qs_rate; +}; + +#define HCLGE_BP_GRP_NUM 32 +#define HCLGE_BP_SUB_GRP_ID_S 0 +#define HCLGE_BP_SUB_GRP_ID_M GENMASK(4, 0) +#define HCLGE_BP_GRP_ID_S 5 +#define HCLGE_BP_GRP_ID_M GENMASK(9, 5) + +#define HCLGE_BP_EXT_GRP_NUM 40 +#define HCLGE_BP_EXT_GRP_ID_S 5 +#define HCLGE_BP_EXT_GRP_ID_M GENMASK(10, 5) + +struct hclge_bp_to_qs_map_cmd { + u8 tc_id; + u8 rsvd[2]; + u8 qs_group_id; + __le32 qs_bit_map; + u32 rsvd1; +}; + +#define HCLGE_PFC_DISABLE 0 +#define HCLGE_PFC_TX_RX_DISABLE 0 + +struct hclge_pfc_en_cmd { + u8 tx_rx_en_bitmap; + u8 pri_en_bitmap; +}; + +struct hclge_cfg_pause_param_cmd { + u8 mac_addr[ETH_ALEN]; + u8 pause_trans_gap; + u8 rsvd; + __le16 pause_trans_time; + u8 rsvd1[6]; + /* extra mac address to do double check for pause frame */ + u8 mac_addr_extra[ETH_ALEN]; + u16 rsvd2; +}; + +struct hclge_pfc_stats_cmd { + __le64 pkt_num[3]; +}; + +struct hclge_port_shapping_cmd { + __le32 port_shapping_para; + u8 flag; + u8 rsvd[3]; + __le32 port_rate; +}; + +struct hclge_shaper_ir_para { + u8 ir_b; /* IR_B parameter of IR shaper */ + u8 ir_u; /* IR_U parameter of IR shaper */ + u8 ir_s; /* IR_S parameter of IR shaper */ +}; + +struct hclge_tm_nodes_cmd { + u8 pg_base_id; + u8 pri_base_id; + __le16 qset_base_id; + __le16 queue_base_id; + u8 pg_num; + u8 pri_num; + __le16 qset_num; + __le16 queue_num; +}; + +struct hclge_tm_shaper_para { + u32 rate; + u8 ir_b; + u8 ir_u; + u8 ir_s; + u8 bs_b; + u8 bs_s; + u8 flag; +}; + +#define hclge_tm_set_field(dest, string, val) \ + hnae3_set_field((dest), \ + (HCLGE_TM_SHAP_##string##_MSK), \ + (HCLGE_TM_SHAP_##string##_LSH), val) +#define hclge_tm_get_field(src, string) \ + hnae3_get_field((src), HCLGE_TM_SHAP_##string##_MSK, \ + HCLGE_TM_SHAP_##string##_LSH) + +int hclge_tm_schd_init(struct hclge_dev *hdev); +int hclge_tm_vport_map_update(struct hclge_dev *hdev); +int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init); +int hclge_tm_schd_setup_hw(struct hclge_dev *hdev); +void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc); +void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc); +void hclge_tm_pfc_info_update(struct hclge_dev *hdev); +int hclge_tm_dwrr_cfg(struct hclge_dev *hdev); +int hclge_tm_init_hw(struct hclge_dev *hdev, bool init); +int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap, + u8 pfc_bitmap); +int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx); +int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr); +int hclge_mac_pause_setup_hw(struct hclge_dev *hdev); +void hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats); +void hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats); +int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate); +int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev); +int hclge_tm_get_qset_num(struct hclge_dev *hdev, u16 *qset_num); +int hclge_tm_get_pri_num(struct hclge_dev *hdev, u8 *pri_num); +int hclge_tm_get_qset_map_pri(struct hclge_dev *hdev, u16 qset_id, u8 *priority, + u8 *link_vld); +int hclge_tm_get_qset_sch_mode(struct hclge_dev *hdev, u16 qset_id, u8 *mode); +int hclge_tm_get_qset_weight(struct hclge_dev *hdev, u16 qset_id, u8 *weight); +int hclge_tm_get_qset_shaper(struct hclge_dev *hdev, u16 qset_id, + struct hclge_tm_shaper_para *para); +int hclge_tm_get_pri_sch_mode(struct hclge_dev *hdev, u8 pri_id, u8 *mode); +int hclge_tm_get_pri_weight(struct hclge_dev *hdev, u8 pri_id, u8 *weight); +int hclge_tm_get_pri_shaper(struct hclge_dev *hdev, u8 pri_id, + enum hclge_opcode_type cmd, + struct hclge_tm_shaper_para *para); +int hclge_tm_get_q_to_qs_map(struct hclge_dev *hdev, u16 q_id, u16 *qset_id); +int hclge_tm_get_q_to_tc(struct hclge_dev *hdev, u16 q_id, u8 *tc_id); +int hclge_tm_get_pg_to_pri_map(struct hclge_dev *hdev, u8 pg_id, + u8 *pri_bit_map); +int hclge_tm_get_pg_weight(struct hclge_dev *hdev, u8 pg_id, u8 *weight); +int hclge_tm_get_pg_sch_mode(struct hclge_dev *hdev, u8 pg_id, u8 *mode); +int hclge_tm_get_pg_shaper(struct hclge_dev *hdev, u8 pg_id, + enum hclge_opcode_type cmd, + struct hclge_tm_shaper_para *para); +int hclge_tm_get_port_shaper(struct hclge_dev *hdev, + struct hclge_tm_shaper_para *para); +int hclge_up_to_tc_map(struct hclge_dev *hdev); +int hclge_dscp_to_tc_map(struct hclge_dev *hdev); +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h new file mode 100644 index 000000000..8510b88d4 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2018-2020 Hisilicon Limited. */ + +/* This must be outside ifdef _HCLGE_TRACE_H */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM hns3 + +#if !defined(_HCLGE_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) +#define _HCLGE_TRACE_H_ + +#include + +#define PF_GET_MBX_LEN (sizeof(struct hclge_mbx_vf_to_pf_cmd) / sizeof(u32)) +#define PF_SEND_MBX_LEN (sizeof(struct hclge_mbx_pf_to_vf_cmd) / sizeof(u32)) + +TRACE_EVENT(hclge_pf_mbx_get, + TP_PROTO( + struct hclge_dev *hdev, + struct hclge_mbx_vf_to_pf_cmd *req), + TP_ARGS(hdev, req), + + TP_STRUCT__entry( + __field(u8, vfid) + __field(u8, code) + __field(u8, subcode) + __string(pciname, pci_name(hdev->pdev)) + __string(devname, &hdev->vport[0].nic.kinfo.netdev->name) + __array(u32, mbx_data, PF_GET_MBX_LEN) + ), + + TP_fast_assign( + __entry->vfid = req->mbx_src_vfid; + __entry->code = req->msg.code; + __entry->subcode = req->msg.subcode; + __assign_str(pciname, pci_name(hdev->pdev)); + __assign_str(devname, &hdev->vport[0].nic.kinfo.netdev->name); + memcpy(__entry->mbx_data, req, + sizeof(struct hclge_mbx_vf_to_pf_cmd)); + ), + + TP_printk( + "%s %s vfid:%u code:%u subcode:%u data:%s", + __get_str(pciname), __get_str(devname), __entry->vfid, + __entry->code, __entry->subcode, + __print_array(__entry->mbx_data, PF_GET_MBX_LEN, sizeof(u32)) + ) +); + +TRACE_EVENT(hclge_pf_mbx_send, + TP_PROTO( + struct hclge_dev *hdev, + struct hclge_mbx_pf_to_vf_cmd *req), + TP_ARGS(hdev, req), + + TP_STRUCT__entry( + __field(u8, vfid) + __field(u16, code) + __string(pciname, pci_name(hdev->pdev)) + __string(devname, &hdev->vport[0].nic.kinfo.netdev->name) + __array(u32, mbx_data, PF_SEND_MBX_LEN) + ), + + TP_fast_assign( + __entry->vfid = req->dest_vfid; + __entry->code = le16_to_cpu(req->msg.code); + __assign_str(pciname, pci_name(hdev->pdev)); + __assign_str(devname, &hdev->vport[0].nic.kinfo.netdev->name); + memcpy(__entry->mbx_data, req, + sizeof(struct hclge_mbx_pf_to_vf_cmd)); + ), + + TP_printk( + "%s %s vfid:%u code:%u data:%s", + __get_str(pciname), __get_str(devname), __entry->vfid, + __entry->code, + __print_array(__entry->mbx_data, PF_SEND_MBX_LEN, sizeof(u32)) + ) +); + +#endif /* _HCLGE_TRACE_H_ */ + +/* This must be outside ifdef _HCLGE_TRACE_H */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE hclge_trace +#include diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h new file mode 100644 index 000000000..537b887fa --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h @@ -0,0 +1,136 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2016-2017 Hisilicon Limited. */ + +#ifndef __HCLGEVF_CMD_H +#define __HCLGEVF_CMD_H +#include +#include +#include "hnae3.h" +#include "hclge_comm_cmd.h" + +#define HCLGEVF_CMDQ_RX_INVLD_B 0 +#define HCLGEVF_CMDQ_RX_OUTVLD_B 1 + +struct hclgevf_hw; +struct hclgevf_dev; + +#define HCLGEVF_SYNC_RX_RING_HEAD_EN_B 4 + +#define HCLGEVF_TQP_REG_OFFSET 0x80000 +#define HCLGEVF_TQP_REG_SIZE 0x200 + +#define HCLGEVF_TQP_MAX_SIZE_DEV_V2 1024 +#define HCLGEVF_TQP_EXT_REG_OFFSET 0x100 + +struct hclgevf_tqp_map { + __le16 tqp_id; /* Absolute tqp id for in this pf */ + u8 tqp_vf; /* VF id */ +#define HCLGEVF_TQP_MAP_TYPE_PF 0 +#define HCLGEVF_TQP_MAP_TYPE_VF 1 +#define HCLGEVF_TQP_MAP_TYPE_B 0 +#define HCLGEVF_TQP_MAP_EN_B 1 + u8 tqp_flag; /* Indicate it's pf or vf tqp */ + __le16 tqp_vid; /* Virtual id in this pf/vf */ + u8 rsv[18]; +}; + +#define HCLGEVF_VECTOR_ELEMENTS_PER_CMD 10 + +enum hclgevf_int_type { + HCLGEVF_INT_TX = 0, + HCLGEVF_INT_RX, + HCLGEVF_INT_EVENT, +}; + +struct hclgevf_ctrl_vector_chain { + u8 int_vector_id; + u8 int_cause_num; +#define HCLGEVF_INT_TYPE_S 0 +#define HCLGEVF_INT_TYPE_M 0x3 +#define HCLGEVF_TQP_ID_S 2 +#define HCLGEVF_TQP_ID_M (0x3fff << HCLGEVF_TQP_ID_S) + __le16 tqp_type_and_id[HCLGEVF_VECTOR_ELEMENTS_PER_CMD]; + u8 vfid; + u8 resv; +}; + +#define HCLGEVF_MSIX_OFT_ROCEE_S 0 +#define HCLGEVF_MSIX_OFT_ROCEE_M (0xffff << HCLGEVF_MSIX_OFT_ROCEE_S) +#define HCLGEVF_VEC_NUM_S 0 +#define HCLGEVF_VEC_NUM_M (0xff << HCLGEVF_VEC_NUM_S) +struct hclgevf_query_res_cmd { + __le16 tqp_num; + __le16 reserved; + __le16 msixcap_localid_ba_nic; + __le16 msixcap_localid_ba_rocee; + __le16 vf_intr_vector_number; + __le16 rsv[7]; +}; + +#define HCLGEVF_GRO_EN_B 0 +struct hclgevf_cfg_gro_status_cmd { + u8 gro_en; + u8 rsv[23]; +}; + +#define HCLGEVF_LINK_STS_B 0 +#define HCLGEVF_LINK_STATUS BIT(HCLGEVF_LINK_STS_B) +struct hclgevf_link_status_cmd { + u8 status; + u8 rsv[23]; +}; + +#define HCLGEVF_RING_ID_MASK 0x3ff +#define HCLGEVF_TQP_ENABLE_B 0 + +struct hclgevf_cfg_com_tqp_queue_cmd { + __le16 tqp_id; + __le16 stream_id; + u8 enable; + u8 rsv[19]; +}; + +struct hclgevf_cfg_tx_queue_pointer_cmd { + __le16 tqp_id; + __le16 tx_tail; + __le16 tx_head; + __le16 fbd_num; + __le16 ring_offset; + u8 rsv[14]; +}; + +/* this bit indicates that the driver is ready for hardware reset */ +#define HCLGEVF_NIC_SW_RST_RDY_B 16 +#define HCLGEVF_NIC_SW_RST_RDY BIT(HCLGEVF_NIC_SW_RST_RDY_B) + +#define HCLGEVF_NIC_CMQ_DESC_NUM 1024 +#define HCLGEVF_NIC_CMQ_DESC_NUM_S 3 + +#define HCLGEVF_QUERY_DEV_SPECS_BD_NUM 4 + +#define hclgevf_cmd_setup_basic_desc(desc, opcode, is_read) \ + hclge_comm_cmd_setup_basic_desc(desc, opcode, is_read) + +struct hclgevf_dev_specs_0_cmd { + __le32 rsv0; + __le32 mac_entry_num; + __le32 mng_entry_num; + __le16 rss_ind_tbl_size; + __le16 rss_key_size; + __le16 int_ql_max; + u8 max_non_tso_bd_num; + u8 rsv1[5]; +}; + +#define HCLGEVF_DEF_MAX_INT_GL 0x1FE0U + +struct hclgevf_dev_specs_1_cmd { + __le16 max_frm_size; + __le16 rsv0; + __le16 max_int_gl; + u8 rsv1[18]; +}; + +int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclge_desc *desc, int num); +void hclgevf_arq_init(struct hclgevf_dev *hdev); +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c new file mode 100644 index 000000000..fdc19868b --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c @@ -0,0 +1,136 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2021 Hisilicon Limited. */ + +#include + +#include "hclgevf_devlink.h" + +static int hclgevf_devlink_info_get(struct devlink *devlink, + struct devlink_info_req *req, + struct netlink_ext_ack *extack) +{ +#define HCLGEVF_DEVLINK_FW_STRING_LEN 32 + struct hclgevf_devlink_priv *priv = devlink_priv(devlink); + char version_str[HCLGEVF_DEVLINK_FW_STRING_LEN]; + struct hclgevf_dev *hdev = priv->hdev; + int ret; + + ret = devlink_info_driver_name_put(req, KBUILD_MODNAME); + if (ret) + return ret; + + snprintf(version_str, sizeof(version_str), "%lu.%lu.%lu.%lu", + hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE3_MASK, + HNAE3_FW_VERSION_BYTE3_SHIFT), + hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE2_MASK, + HNAE3_FW_VERSION_BYTE2_SHIFT), + hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE1_MASK, + HNAE3_FW_VERSION_BYTE1_SHIFT), + hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE0_MASK, + HNAE3_FW_VERSION_BYTE0_SHIFT)); + + return devlink_info_version_running_put(req, + DEVLINK_INFO_VERSION_GENERIC_FW, + version_str); +} + +static int hclgevf_devlink_reload_down(struct devlink *devlink, + bool netns_change, + enum devlink_reload_action action, + enum devlink_reload_limit limit, + struct netlink_ext_ack *extack) +{ + struct hclgevf_devlink_priv *priv = devlink_priv(devlink); + struct hclgevf_dev *hdev = priv->hdev; + struct hnae3_handle *h = &hdev->nic; + struct pci_dev *pdev = hdev->pdev; + int ret; + + if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) { + dev_err(&pdev->dev, "reset is handling\n"); + return -EBUSY; + } + + switch (action) { + case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: + rtnl_lock(); + ret = hdev->nic_client->ops->reset_notify(h, HNAE3_DOWN_CLIENT); + if (ret) { + rtnl_unlock(); + return ret; + } + + ret = hdev->nic_client->ops->reset_notify(h, + HNAE3_UNINIT_CLIENT); + rtnl_unlock(); + return ret; + default: + return -EOPNOTSUPP; + } +} + +static int hclgevf_devlink_reload_up(struct devlink *devlink, + enum devlink_reload_action action, + enum devlink_reload_limit limit, + u32 *actions_performed, + struct netlink_ext_ack *extack) +{ + struct hclgevf_devlink_priv *priv = devlink_priv(devlink); + struct hclgevf_dev *hdev = priv->hdev; + struct hnae3_handle *h = &hdev->nic; + int ret; + + *actions_performed = BIT(action); + switch (action) { + case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: + rtnl_lock(); + ret = hdev->nic_client->ops->reset_notify(h, HNAE3_INIT_CLIENT); + if (ret) { + rtnl_unlock(); + return ret; + } + + ret = hdev->nic_client->ops->reset_notify(h, HNAE3_UP_CLIENT); + rtnl_unlock(); + return ret; + default: + return -EOPNOTSUPP; + } +} + +static const struct devlink_ops hclgevf_devlink_ops = { + .info_get = hclgevf_devlink_info_get, + .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT), + .reload_down = hclgevf_devlink_reload_down, + .reload_up = hclgevf_devlink_reload_up, +}; + +int hclgevf_devlink_init(struct hclgevf_dev *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + struct hclgevf_devlink_priv *priv; + struct devlink *devlink; + + devlink = + devlink_alloc(&hclgevf_devlink_ops, + sizeof(struct hclgevf_devlink_priv), &pdev->dev); + if (!devlink) + return -ENOMEM; + + priv = devlink_priv(devlink); + priv->hdev = hdev; + hdev->devlink = devlink; + + devlink_set_features(devlink, DEVLINK_F_RELOAD); + devlink_register(devlink); + return 0; +} + +void hclgevf_devlink_uninit(struct hclgevf_dev *hdev) +{ + struct devlink *devlink = hdev->devlink; + + devlink_unregister(devlink); + + devlink_free(devlink); +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.h new file mode 100644 index 000000000..e09ea3d8a --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2021 Hisilicon Limited. */ + +#ifndef __HCLGEVF_DEVLINK_H +#define __HCLGEVF_DEVLINK_H + +#include "hclgevf_main.h" + +struct hclgevf_devlink_priv { + struct hclgevf_dev *hdev; +}; + +int hclgevf_devlink_init(struct hclgevf_dev *hdev); +void hclgevf_devlink_uninit(struct hclgevf_dev *hdev); +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c new file mode 100644 index 000000000..5a978ea10 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -0,0 +1,3482 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. + +#include +#include +#include +#include "hclgevf_cmd.h" +#include "hclgevf_main.h" +#include "hclge_mbx.h" +#include "hnae3.h" +#include "hclgevf_devlink.h" +#include "hclge_comm_rss.h" + +#define HCLGEVF_NAME "hclgevf" + +#define HCLGEVF_RESET_MAX_FAIL_CNT 5 + +static int hclgevf_reset_hdev(struct hclgevf_dev *hdev); +static void hclgevf_task_schedule(struct hclgevf_dev *hdev, + unsigned long delay); + +static struct hnae3_ae_algo ae_algovf; + +static struct workqueue_struct *hclgevf_wq; + +static const struct pci_device_id ae_algovf_pci_tbl[] = { + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_VF), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF), + HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, + /* required last entry */ + {0, } +}; + +MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); + +static const u32 cmdq_reg_addr_list[] = {HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG, + HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG, + HCLGE_COMM_NIC_CSQ_DEPTH_REG, + HCLGE_COMM_NIC_CSQ_TAIL_REG, + HCLGE_COMM_NIC_CSQ_HEAD_REG, + HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG, + HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG, + HCLGE_COMM_NIC_CRQ_DEPTH_REG, + HCLGE_COMM_NIC_CRQ_TAIL_REG, + HCLGE_COMM_NIC_CRQ_HEAD_REG, + HCLGE_COMM_VECTOR0_CMDQ_SRC_REG, + HCLGE_COMM_VECTOR0_CMDQ_STATE_REG, + HCLGE_COMM_CMDQ_INTR_EN_REG, + HCLGE_COMM_CMDQ_INTR_GEN_REG}; + +static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE, + HCLGEVF_RST_ING, + HCLGEVF_GRO_EN_REG}; + +static const u32 ring_reg_addr_list[] = {HCLGEVF_RING_RX_ADDR_L_REG, + HCLGEVF_RING_RX_ADDR_H_REG, + HCLGEVF_RING_RX_BD_NUM_REG, + HCLGEVF_RING_RX_BD_LENGTH_REG, + HCLGEVF_RING_RX_MERGE_EN_REG, + HCLGEVF_RING_RX_TAIL_REG, + HCLGEVF_RING_RX_HEAD_REG, + HCLGEVF_RING_RX_FBD_NUM_REG, + HCLGEVF_RING_RX_OFFSET_REG, + HCLGEVF_RING_RX_FBD_OFFSET_REG, + HCLGEVF_RING_RX_STASH_REG, + HCLGEVF_RING_RX_BD_ERR_REG, + HCLGEVF_RING_TX_ADDR_L_REG, + HCLGEVF_RING_TX_ADDR_H_REG, + HCLGEVF_RING_TX_BD_NUM_REG, + HCLGEVF_RING_TX_PRIORITY_REG, + HCLGEVF_RING_TX_TC_REG, + HCLGEVF_RING_TX_MERGE_EN_REG, + HCLGEVF_RING_TX_TAIL_REG, + HCLGEVF_RING_TX_HEAD_REG, + HCLGEVF_RING_TX_FBD_NUM_REG, + HCLGEVF_RING_TX_OFFSET_REG, + HCLGEVF_RING_TX_EBD_NUM_REG, + HCLGEVF_RING_TX_EBD_OFFSET_REG, + HCLGEVF_RING_TX_BD_ERR_REG, + HCLGEVF_RING_EN_REG}; + +static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG, + HCLGEVF_TQP_INTR_GL0_REG, + HCLGEVF_TQP_INTR_GL1_REG, + HCLGEVF_TQP_INTR_GL2_REG, + HCLGEVF_TQP_INTR_RL_REG}; + +/* hclgevf_cmd_send - send command to command queue + * @hw: pointer to the hw struct + * @desc: prefilled descriptor for describing the command + * @num : the number of descriptors to be sent + * + * This is the main send command for command queue, it + * sends the queue, cleans the queue, etc + */ +int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclge_desc *desc, int num) +{ + return hclge_comm_cmd_send(&hw->hw, desc, num); +} + +void hclgevf_arq_init(struct hclgevf_dev *hdev) +{ + struct hclge_comm_cmq *cmdq = &hdev->hw.hw.cmq; + + spin_lock(&cmdq->crq.lock); + /* initialize the pointers of async rx queue of mailbox */ + hdev->arq.hdev = hdev; + hdev->arq.head = 0; + hdev->arq.tail = 0; + atomic_set(&hdev->arq.count, 0); + spin_unlock(&cmdq->crq.lock); +} + +static struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle) +{ + if (!handle->client) + return container_of(handle, struct hclgevf_dev, nic); + else if (handle->client->type == HNAE3_CLIENT_ROCE) + return container_of(handle, struct hclgevf_dev, roce); + else + return container_of(handle, struct hclgevf_dev, nic); +} + +static void hclgevf_update_stats(struct hnae3_handle *handle, + struct net_device_stats *net_stats) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + int status; + + status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw); + if (status) + dev_err(&hdev->pdev->dev, + "VF update of TQPS stats fail, status = %d.\n", + status); +} + +static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset) +{ + if (strset == ETH_SS_TEST) + return -EOPNOTSUPP; + else if (strset == ETH_SS_STATS) + return hclge_comm_tqps_get_sset_count(handle); + + return 0; +} + +static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset, + u8 *data) +{ + u8 *p = (char *)data; + + if (strset == ETH_SS_STATS) + p = hclge_comm_tqps_get_strings(handle, p); +} + +static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data) +{ + hclge_comm_tqps_get_stats(handle, data); +} + +static void hclgevf_build_send_msg(struct hclge_vf_to_pf_msg *msg, u8 code, + u8 subcode) +{ + if (msg) { + memset(msg, 0, sizeof(struct hclge_vf_to_pf_msg)); + msg->code = code; + msg->subcode = subcode; + } +} + +static int hclgevf_get_basic_info(struct hclgevf_dev *hdev) +{ + struct hnae3_ae_dev *ae_dev = hdev->ae_dev; + u8 resp_msg[HCLGE_MBX_MAX_RESP_DATA_SIZE]; + struct hclge_basic_info *basic_info; + struct hclge_vf_to_pf_msg send_msg; + unsigned long caps; + int status; + + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_BASIC_INFO, 0); + status = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, + sizeof(resp_msg)); + if (status) { + dev_err(&hdev->pdev->dev, + "failed to get basic info from pf, ret = %d", status); + return status; + } + + basic_info = (struct hclge_basic_info *)resp_msg; + + hdev->hw_tc_map = basic_info->hw_tc_map; + hdev->mbx_api_version = le16_to_cpu(basic_info->mbx_api_version); + caps = le32_to_cpu(basic_info->pf_caps); + if (test_bit(HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B, &caps)) + set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps); + + return 0; +} + +static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev) +{ + struct hnae3_handle *nic = &hdev->nic; + struct hclge_vf_to_pf_msg send_msg; + u8 resp_msg; + int ret; + + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, + HCLGE_MBX_GET_PORT_BASE_VLAN_STATE); + ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &resp_msg, + sizeof(u8)); + if (ret) { + dev_err(&hdev->pdev->dev, + "VF request to get port based vlan state failed %d", + ret); + return ret; + } + + nic->port_base_vlan_state = resp_msg; + + return 0; +} + +static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) +{ +#define HCLGEVF_TQPS_RSS_INFO_LEN 6 + + struct hclge_mbx_vf_queue_info *queue_info; + u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; + struct hclge_vf_to_pf_msg send_msg; + int status; + + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QINFO, 0); + status = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, + HCLGEVF_TQPS_RSS_INFO_LEN); + if (status) { + dev_err(&hdev->pdev->dev, + "VF request to get tqp info from PF failed %d", + status); + return status; + } + + queue_info = (struct hclge_mbx_vf_queue_info *)resp_msg; + hdev->num_tqps = le16_to_cpu(queue_info->num_tqps); + hdev->rss_size_max = le16_to_cpu(queue_info->rss_size); + hdev->rx_buf_len = le16_to_cpu(queue_info->rx_buf_len); + + return 0; +} + +static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev) +{ +#define HCLGEVF_TQPS_DEPTH_INFO_LEN 4 + + struct hclge_mbx_vf_queue_depth *queue_depth; + u8 resp_msg[HCLGEVF_TQPS_DEPTH_INFO_LEN]; + struct hclge_vf_to_pf_msg send_msg; + int ret; + + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QDEPTH, 0); + ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, + HCLGEVF_TQPS_DEPTH_INFO_LEN); + if (ret) { + dev_err(&hdev->pdev->dev, + "VF request to get tqp depth info from PF failed %d", + ret); + return ret; + } + + queue_depth = (struct hclge_mbx_vf_queue_depth *)resp_msg; + hdev->num_tx_desc = le16_to_cpu(queue_depth->num_tx_desc); + hdev->num_rx_desc = le16_to_cpu(queue_depth->num_rx_desc); + + return 0; +} + +static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hclge_vf_to_pf_msg send_msg; + u16 qid_in_pf = 0; + u8 resp_data[2]; + int ret; + + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QID_IN_PF, 0); + *(__le16 *)send_msg.data = cpu_to_le16(queue_id); + ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_data, + sizeof(resp_data)); + if (!ret) + qid_in_pf = le16_to_cpu(*(__le16 *)resp_data); + + return qid_in_pf; +} + +static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev) +{ + struct hclge_vf_to_pf_msg send_msg; + u8 resp_msg[2]; + int ret; + + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MEDIA_TYPE, 0); + ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, + sizeof(resp_msg)); + if (ret) { + dev_err(&hdev->pdev->dev, + "VF request to get the pf port media type failed %d", + ret); + return ret; + } + + hdev->hw.mac.media_type = resp_msg[0]; + hdev->hw.mac.module_type = resp_msg[1]; + + return 0; +} + +static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); + struct hclge_comm_tqp *tqp; + int i; + + hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, + sizeof(struct hclge_comm_tqp), GFP_KERNEL); + if (!hdev->htqp) + return -ENOMEM; + + tqp = hdev->htqp; + + for (i = 0; i < hdev->num_tqps; i++) { + tqp->dev = &hdev->pdev->dev; + tqp->index = i; + + tqp->q.ae_algo = &ae_algovf; + tqp->q.buf_size = hdev->rx_buf_len; + tqp->q.tx_desc_num = hdev->num_tx_desc; + tqp->q.rx_desc_num = hdev->num_rx_desc; + + /* need an extended offset to configure queues >= + * HCLGEVF_TQP_MAX_SIZE_DEV_V2. + */ + if (i < HCLGEVF_TQP_MAX_SIZE_DEV_V2) + tqp->q.io_base = hdev->hw.hw.io_base + + HCLGEVF_TQP_REG_OFFSET + + i * HCLGEVF_TQP_REG_SIZE; + else + tqp->q.io_base = hdev->hw.hw.io_base + + HCLGEVF_TQP_REG_OFFSET + + HCLGEVF_TQP_EXT_REG_OFFSET + + (i - HCLGEVF_TQP_MAX_SIZE_DEV_V2) * + HCLGEVF_TQP_REG_SIZE; + + /* when device supports tx push and has device memory, + * the queue can execute push mode or doorbell mode on + * device memory. + */ + if (test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps)) + tqp->q.mem_base = hdev->hw.hw.mem_base + + HCLGEVF_TQP_MEM_OFFSET(hdev, i); + + tqp++; + } + + return 0; +} + +static int hclgevf_knic_setup(struct hclgevf_dev *hdev) +{ + struct hnae3_handle *nic = &hdev->nic; + struct hnae3_knic_private_info *kinfo; + u16 new_tqps = hdev->num_tqps; + unsigned int i; + u8 num_tc = 0; + + kinfo = &nic->kinfo; + kinfo->num_tx_desc = hdev->num_tx_desc; + kinfo->num_rx_desc = hdev->num_rx_desc; + kinfo->rx_buf_len = hdev->rx_buf_len; + for (i = 0; i < HCLGE_COMM_MAX_TC_NUM; i++) + if (hdev->hw_tc_map & BIT(i)) + num_tc++; + + num_tc = num_tc ? num_tc : 1; + kinfo->tc_info.num_tc = num_tc; + kinfo->rss_size = min_t(u16, hdev->rss_size_max, new_tqps / num_tc); + new_tqps = kinfo->rss_size * num_tc; + kinfo->num_tqps = min(new_tqps, hdev->num_tqps); + + kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, + sizeof(struct hnae3_queue *), GFP_KERNEL); + if (!kinfo->tqp) + return -ENOMEM; + + for (i = 0; i < kinfo->num_tqps; i++) { + hdev->htqp[i].q.handle = &hdev->nic; + hdev->htqp[i].q.tqp_index = i; + kinfo->tqp[i] = &hdev->htqp[i].q; + } + + /* after init the max rss_size and tqps, adjust the default tqp numbers + * and rss size with the actual vector numbers + */ + kinfo->num_tqps = min_t(u16, hdev->num_nic_msix - 1, kinfo->num_tqps); + kinfo->rss_size = min_t(u16, kinfo->num_tqps / num_tc, + kinfo->rss_size); + + return 0; +} + +static void hclgevf_request_link_info(struct hclgevf_dev *hdev) +{ + struct hclge_vf_to_pf_msg send_msg; + int status; + + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_STATUS, 0); + status = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); + if (status) + dev_err(&hdev->pdev->dev, + "VF failed to fetch link status(%d) from PF", status); +} + +void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) +{ + struct hnae3_handle *rhandle = &hdev->roce; + struct hnae3_handle *handle = &hdev->nic; + struct hnae3_client *rclient; + struct hnae3_client *client; + + if (test_and_set_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state)) + return; + + client = handle->client; + rclient = hdev->roce_client; + + link_state = + test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state; + if (link_state != hdev->hw.mac.link) { + hdev->hw.mac.link = link_state; + client->ops->link_status_change(handle, !!link_state); + if (rclient && rclient->ops->link_status_change) + rclient->ops->link_status_change(rhandle, !!link_state); + } + + clear_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state); +} + +static void hclgevf_update_link_mode(struct hclgevf_dev *hdev) +{ +#define HCLGEVF_ADVERTISING 0 +#define HCLGEVF_SUPPORTED 1 + + struct hclge_vf_to_pf_msg send_msg; + + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_MODE, 0); + send_msg.data[0] = HCLGEVF_ADVERTISING; + hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); + send_msg.data[0] = HCLGEVF_SUPPORTED; + hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); +} + +static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) +{ + struct hnae3_handle *nic = &hdev->nic; + int ret; + + nic->ae_algo = &ae_algovf; + nic->pdev = hdev->pdev; + nic->numa_node_mask = hdev->numa_node_mask; + nic->flags |= HNAE3_SUPPORT_VF; + nic->kinfo.io_base = hdev->hw.hw.io_base; + + ret = hclgevf_knic_setup(hdev); + if (ret) + dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n", + ret); + return ret; +} + +static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id) +{ + if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) { + dev_warn(&hdev->pdev->dev, + "vector(vector_id %d) has been freed.\n", vector_id); + return; + } + + hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT; + hdev->num_msi_left += 1; + hdev->num_msi_used -= 1; +} + +static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num, + struct hnae3_vector_info *vector_info) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hnae3_vector_info *vector = vector_info; + int alloc = 0; + int i, j; + + vector_num = min_t(u16, hdev->num_nic_msix - 1, vector_num); + vector_num = min(hdev->num_msi_left, vector_num); + + for (j = 0; j < vector_num; j++) { + for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) { + if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) { + vector->vector = pci_irq_vector(hdev->pdev, i); + vector->io_addr = hdev->hw.hw.io_base + + HCLGEVF_VECTOR_REG_BASE + + (i - 1) * HCLGEVF_VECTOR_REG_OFFSET; + hdev->vector_status[i] = 0; + hdev->vector_irq[i] = vector->vector; + + vector++; + alloc++; + + break; + } + } + } + hdev->num_msi_left -= alloc; + hdev->num_msi_used += alloc; + + return alloc; +} + +static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector) +{ + int i; + + for (i = 0; i < hdev->num_msi; i++) + if (vector == hdev->vector_irq[i]) + return i; + + return -EINVAL; +} + +/* for revision 0x20, vf shared the same rss config with pf */ +static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev) +{ +#define HCLGEVF_RSS_MBX_RESP_LEN 8 + struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; + u8 resp_msg[HCLGEVF_RSS_MBX_RESP_LEN]; + struct hclge_vf_to_pf_msg send_msg; + u16 msg_num, hash_key_index; + u8 index; + int ret; + + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_RSS_KEY, 0); + msg_num = (HCLGE_COMM_RSS_KEY_SIZE + HCLGEVF_RSS_MBX_RESP_LEN - 1) / + HCLGEVF_RSS_MBX_RESP_LEN; + for (index = 0; index < msg_num; index++) { + send_msg.data[0] = index; + ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, + HCLGEVF_RSS_MBX_RESP_LEN); + if (ret) { + dev_err(&hdev->pdev->dev, + "VF get rss hash key from PF failed, ret=%d", + ret); + return ret; + } + + hash_key_index = HCLGEVF_RSS_MBX_RESP_LEN * index; + if (index == msg_num - 1) + memcpy(&rss_cfg->rss_hash_key[hash_key_index], + &resp_msg[0], + HCLGE_COMM_RSS_KEY_SIZE - hash_key_index); + else + memcpy(&rss_cfg->rss_hash_key[hash_key_index], + &resp_msg[0], HCLGEVF_RSS_MBX_RESP_LEN); + } + + return 0; +} + +static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; + int ret; + + if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { + hclge_comm_get_rss_hash_info(rss_cfg, key, hfunc); + } else { + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + if (key) { + ret = hclgevf_get_rss_hash_key(hdev); + if (ret) + return ret; + memcpy(key, rss_cfg->rss_hash_key, + HCLGE_COMM_RSS_KEY_SIZE); + } + } + + hclge_comm_get_rss_indir_tbl(rss_cfg, indir, + hdev->ae_dev->dev_specs.rss_ind_tbl_size); + + return 0; +} + +static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; + int ret, i; + + if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { + ret = hclge_comm_set_rss_hash_key(rss_cfg, &hdev->hw.hw, key, + hfunc); + if (ret) + return ret; + } + + /* update the shadow RSS table with user specified qids */ + for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++) + rss_cfg->rss_indirection_tbl[i] = indir[i]; + + /* update the hardware */ + return hclge_comm_set_rss_indir_table(hdev->ae_dev, &hdev->hw.hw, + rss_cfg->rss_indirection_tbl); +} + +static int hclgevf_set_rss_tuple(struct hnae3_handle *handle, + struct ethtool_rxnfc *nfc) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + int ret; + + if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) + return -EOPNOTSUPP; + + ret = hclge_comm_set_rss_tuple(hdev->ae_dev, &hdev->hw.hw, + &hdev->rss_cfg, nfc); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to set rss tuple, ret = %d.\n", ret); + + return ret; +} + +static int hclgevf_get_rss_tuple(struct hnae3_handle *handle, + struct ethtool_rxnfc *nfc) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + u8 tuple_sets; + int ret; + + if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) + return -EOPNOTSUPP; + + nfc->data = 0; + + ret = hclge_comm_get_rss_tuple(&hdev->rss_cfg, nfc->flow_type, + &tuple_sets); + if (ret || !tuple_sets) + return ret; + + nfc->data = hclge_comm_convert_rss_tuple(tuple_sets); + + return 0; +} + +static int hclgevf_get_tc_size(struct hnae3_handle *handle) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; + + return rss_cfg->rss_size; +} + +static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, + int vector_id, + struct hnae3_ring_chain_node *ring_chain) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hclge_vf_to_pf_msg send_msg; + struct hnae3_ring_chain_node *node; + int status; + int i = 0; + + memset(&send_msg, 0, sizeof(send_msg)); + send_msg.code = en ? HCLGE_MBX_MAP_RING_TO_VECTOR : + HCLGE_MBX_UNMAP_RING_TO_VECTOR; + send_msg.vector_id = vector_id; + + for (node = ring_chain; node; node = node->next) { + send_msg.param[i].ring_type = + hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B); + + send_msg.param[i].tqp_index = node->tqp_index; + send_msg.param[i].int_gl_index = + hnae3_get_field(node->int_gl_idx, + HNAE3_RING_GL_IDX_M, + HNAE3_RING_GL_IDX_S); + + i++; + if (i == HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM || !node->next) { + send_msg.ring_num = i; + + status = hclgevf_send_mbx_msg(hdev, &send_msg, false, + NULL, 0); + if (status) { + dev_err(&hdev->pdev->dev, + "Map TQP fail, status is %d.\n", + status); + return status; + } + i = 0; + } + } + + return 0; +} + +static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector, + struct hnae3_ring_chain_node *ring_chain) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + int vector_id; + + vector_id = hclgevf_get_vector_index(hdev, vector); + if (vector_id < 0) { + dev_err(&handle->pdev->dev, + "Get vector index fail. ret =%d\n", vector_id); + return vector_id; + } + + return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain); +} + +static int hclgevf_unmap_ring_from_vector( + struct hnae3_handle *handle, + int vector, + struct hnae3_ring_chain_node *ring_chain) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + int ret, vector_id; + + if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) + return 0; + + vector_id = hclgevf_get_vector_index(hdev, vector); + if (vector_id < 0) { + dev_err(&handle->pdev->dev, + "Get vector index fail. ret =%d\n", vector_id); + return vector_id; + } + + ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain); + if (ret) + dev_err(&handle->pdev->dev, + "Unmap ring from vector fail. vector=%d, ret =%d\n", + vector_id, + ret); + + return ret; +} + +static int hclgevf_put_vector(struct hnae3_handle *handle, int vector) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + int vector_id; + + vector_id = hclgevf_get_vector_index(hdev, vector); + if (vector_id < 0) { + dev_err(&handle->pdev->dev, + "hclgevf_put_vector get vector index fail. ret =%d\n", + vector_id); + return vector_id; + } + + hclgevf_free_vector(hdev, vector_id); + + return 0; +} + +static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, + bool en_uc_pmc, bool en_mc_pmc, + bool en_bc_pmc) +{ + struct hnae3_handle *handle = &hdev->nic; + struct hclge_vf_to_pf_msg send_msg; + int ret; + + memset(&send_msg, 0, sizeof(send_msg)); + send_msg.code = HCLGE_MBX_SET_PROMISC_MODE; + send_msg.en_bc = en_bc_pmc ? 1 : 0; + send_msg.en_uc = en_uc_pmc ? 1 : 0; + send_msg.en_mc = en_mc_pmc ? 1 : 0; + send_msg.en_limit_promisc = test_bit(HNAE3_PFLAG_LIMIT_PROMISC, + &handle->priv_flags) ? 1 : 0; + + ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); + if (ret) + dev_err(&hdev->pdev->dev, + "Set promisc mode fail, status is %d.\n", ret); + + return ret; +} + +static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, + bool en_mc_pmc) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + bool en_bc_pmc; + + en_bc_pmc = hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2; + + return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc, + en_bc_pmc); +} + +static void hclgevf_request_update_promisc_mode(struct hnae3_handle *handle) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); + hclgevf_task_schedule(hdev, 0); +} + +static void hclgevf_sync_promisc_mode(struct hclgevf_dev *hdev) +{ + struct hnae3_handle *handle = &hdev->nic; + bool en_uc_pmc = handle->netdev_flags & HNAE3_UPE; + bool en_mc_pmc = handle->netdev_flags & HNAE3_MPE; + int ret; + + if (test_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state)) { + ret = hclgevf_set_promisc_mode(handle, en_uc_pmc, en_mc_pmc); + if (!ret) + clear_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); + } +} + +static int hclgevf_tqp_enable_cmd_send(struct hclgevf_dev *hdev, u16 tqp_id, + u16 stream_id, bool enable) +{ + struct hclgevf_cfg_com_tqp_queue_cmd *req; + struct hclge_desc desc; + + req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data; + + hclgevf_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false); + req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK); + req->stream_id = cpu_to_le16(stream_id); + if (enable) + req->enable |= 1U << HCLGEVF_TQP_ENABLE_B; + + return hclgevf_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclgevf_tqp_enable(struct hnae3_handle *handle, bool enable) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + int ret; + u16 i; + + for (i = 0; i < handle->kinfo.num_tqps; i++) { + ret = hclgevf_tqp_enable_cmd_send(hdev, i, 0, enable); + if (ret) + return ret; + } + + return 0; +} + +static int hclgevf_get_host_mac_addr(struct hclgevf_dev *hdev, u8 *p) +{ + struct hclge_vf_to_pf_msg send_msg; + u8 host_mac[ETH_ALEN]; + int status; + + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MAC_ADDR, 0); + status = hclgevf_send_mbx_msg(hdev, &send_msg, true, host_mac, + ETH_ALEN); + if (status) { + dev_err(&hdev->pdev->dev, + "fail to get VF MAC from host %d", status); + return status; + } + + ether_addr_copy(p, host_mac); + + return 0; +} + +static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + u8 host_mac_addr[ETH_ALEN]; + + if (hclgevf_get_host_mac_addr(hdev, host_mac_addr)) + return; + + hdev->has_pf_mac = !is_zero_ether_addr(host_mac_addr); + if (hdev->has_pf_mac) + ether_addr_copy(p, host_mac_addr); + else + ether_addr_copy(p, hdev->hw.mac.mac_addr); +} + +static int hclgevf_set_mac_addr(struct hnae3_handle *handle, const void *p, + bool is_first) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr; + struct hclge_vf_to_pf_msg send_msg; + u8 *new_mac_addr = (u8 *)p; + int status; + + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_UNICAST, 0); + send_msg.subcode = HCLGE_MBX_MAC_VLAN_UC_MODIFY; + ether_addr_copy(send_msg.data, new_mac_addr); + if (is_first && !hdev->has_pf_mac) + eth_zero_addr(&send_msg.data[ETH_ALEN]); + else + ether_addr_copy(&send_msg.data[ETH_ALEN], old_mac_addr); + status = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); + if (!status) + ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); + + return status; +} + +static struct hclgevf_mac_addr_node * +hclgevf_find_mac_node(struct list_head *list, const u8 *mac_addr) +{ + struct hclgevf_mac_addr_node *mac_node, *tmp; + + list_for_each_entry_safe(mac_node, tmp, list, node) + if (ether_addr_equal(mac_addr, mac_node->mac_addr)) + return mac_node; + + return NULL; +} + +static void hclgevf_update_mac_node(struct hclgevf_mac_addr_node *mac_node, + enum HCLGEVF_MAC_NODE_STATE state) +{ + switch (state) { + /* from set_rx_mode or tmp_add_list */ + case HCLGEVF_MAC_TO_ADD: + if (mac_node->state == HCLGEVF_MAC_TO_DEL) + mac_node->state = HCLGEVF_MAC_ACTIVE; + break; + /* only from set_rx_mode */ + case HCLGEVF_MAC_TO_DEL: + if (mac_node->state == HCLGEVF_MAC_TO_ADD) { + list_del(&mac_node->node); + kfree(mac_node); + } else { + mac_node->state = HCLGEVF_MAC_TO_DEL; + } + break; + /* only from tmp_add_list, the mac_node->state won't be + * HCLGEVF_MAC_ACTIVE + */ + case HCLGEVF_MAC_ACTIVE: + if (mac_node->state == HCLGEVF_MAC_TO_ADD) + mac_node->state = HCLGEVF_MAC_ACTIVE; + break; + } +} + +static int hclgevf_update_mac_list(struct hnae3_handle *handle, + enum HCLGEVF_MAC_NODE_STATE state, + enum HCLGEVF_MAC_ADDR_TYPE mac_type, + const unsigned char *addr) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hclgevf_mac_addr_node *mac_node; + struct list_head *list; + + list = (mac_type == HCLGEVF_MAC_ADDR_UC) ? + &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list; + + spin_lock_bh(&hdev->mac_table.mac_list_lock); + + /* if the mac addr is already in the mac list, no need to add a new + * one into it, just check the mac addr state, convert it to a new + * state, or just remove it, or do nothing. + */ + mac_node = hclgevf_find_mac_node(list, addr); + if (mac_node) { + hclgevf_update_mac_node(mac_node, state); + spin_unlock_bh(&hdev->mac_table.mac_list_lock); + return 0; + } + /* if this address is never added, unnecessary to delete */ + if (state == HCLGEVF_MAC_TO_DEL) { + spin_unlock_bh(&hdev->mac_table.mac_list_lock); + return -ENOENT; + } + + mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC); + if (!mac_node) { + spin_unlock_bh(&hdev->mac_table.mac_list_lock); + return -ENOMEM; + } + + mac_node->state = state; + ether_addr_copy(mac_node->mac_addr, addr); + list_add_tail(&mac_node->node, list); + + spin_unlock_bh(&hdev->mac_table.mac_list_lock); + return 0; +} + +static int hclgevf_add_uc_addr(struct hnae3_handle *handle, + const unsigned char *addr) +{ + return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD, + HCLGEVF_MAC_ADDR_UC, addr); +} + +static int hclgevf_rm_uc_addr(struct hnae3_handle *handle, + const unsigned char *addr) +{ + return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL, + HCLGEVF_MAC_ADDR_UC, addr); +} + +static int hclgevf_add_mc_addr(struct hnae3_handle *handle, + const unsigned char *addr) +{ + return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD, + HCLGEVF_MAC_ADDR_MC, addr); +} + +static int hclgevf_rm_mc_addr(struct hnae3_handle *handle, + const unsigned char *addr) +{ + return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL, + HCLGEVF_MAC_ADDR_MC, addr); +} + +static int hclgevf_add_del_mac_addr(struct hclgevf_dev *hdev, + struct hclgevf_mac_addr_node *mac_node, + enum HCLGEVF_MAC_ADDR_TYPE mac_type) +{ + struct hclge_vf_to_pf_msg send_msg; + u8 code, subcode; + + if (mac_type == HCLGEVF_MAC_ADDR_UC) { + code = HCLGE_MBX_SET_UNICAST; + if (mac_node->state == HCLGEVF_MAC_TO_ADD) + subcode = HCLGE_MBX_MAC_VLAN_UC_ADD; + else + subcode = HCLGE_MBX_MAC_VLAN_UC_REMOVE; + } else { + code = HCLGE_MBX_SET_MULTICAST; + if (mac_node->state == HCLGEVF_MAC_TO_ADD) + subcode = HCLGE_MBX_MAC_VLAN_MC_ADD; + else + subcode = HCLGE_MBX_MAC_VLAN_MC_REMOVE; + } + + hclgevf_build_send_msg(&send_msg, code, subcode); + ether_addr_copy(send_msg.data, mac_node->mac_addr); + return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); +} + +static void hclgevf_config_mac_list(struct hclgevf_dev *hdev, + struct list_head *list, + enum HCLGEVF_MAC_ADDR_TYPE mac_type) +{ + char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; + struct hclgevf_mac_addr_node *mac_node, *tmp; + int ret; + + list_for_each_entry_safe(mac_node, tmp, list, node) { + ret = hclgevf_add_del_mac_addr(hdev, mac_node, mac_type); + if (ret) { + hnae3_format_mac_addr(format_mac_addr, + mac_node->mac_addr); + dev_err(&hdev->pdev->dev, + "failed to configure mac %s, state = %d, ret = %d\n", + format_mac_addr, mac_node->state, ret); + return; + } + if (mac_node->state == HCLGEVF_MAC_TO_ADD) { + mac_node->state = HCLGEVF_MAC_ACTIVE; + } else { + list_del(&mac_node->node); + kfree(mac_node); + } + } +} + +static void hclgevf_sync_from_add_list(struct list_head *add_list, + struct list_head *mac_list) +{ + struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; + + list_for_each_entry_safe(mac_node, tmp, add_list, node) { + /* if the mac address from tmp_add_list is not in the + * uc/mc_mac_list, it means have received a TO_DEL request + * during the time window of sending mac config request to PF + * If mac_node state is ACTIVE, then change its state to TO_DEL, + * then it will be removed at next time. If is TO_ADD, it means + * send TO_ADD request failed, so just remove the mac node. + */ + new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr); + if (new_node) { + hclgevf_update_mac_node(new_node, mac_node->state); + list_del(&mac_node->node); + kfree(mac_node); + } else if (mac_node->state == HCLGEVF_MAC_ACTIVE) { + mac_node->state = HCLGEVF_MAC_TO_DEL; + list_move_tail(&mac_node->node, mac_list); + } else { + list_del(&mac_node->node); + kfree(mac_node); + } + } +} + +static void hclgevf_sync_from_del_list(struct list_head *del_list, + struct list_head *mac_list) +{ + struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; + + list_for_each_entry_safe(mac_node, tmp, del_list, node) { + new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr); + if (new_node) { + /* If the mac addr is exist in the mac list, it means + * received a new request TO_ADD during the time window + * of sending mac addr configurrequest to PF, so just + * change the mac state to ACTIVE. + */ + new_node->state = HCLGEVF_MAC_ACTIVE; + list_del(&mac_node->node); + kfree(mac_node); + } else { + list_move_tail(&mac_node->node, mac_list); + } + } +} + +static void hclgevf_clear_list(struct list_head *list) +{ + struct hclgevf_mac_addr_node *mac_node, *tmp; + + list_for_each_entry_safe(mac_node, tmp, list, node) { + list_del(&mac_node->node); + kfree(mac_node); + } +} + +static void hclgevf_sync_mac_list(struct hclgevf_dev *hdev, + enum HCLGEVF_MAC_ADDR_TYPE mac_type) +{ + struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; + struct list_head tmp_add_list, tmp_del_list; + struct list_head *list; + + INIT_LIST_HEAD(&tmp_add_list); + INIT_LIST_HEAD(&tmp_del_list); + + /* move the mac addr to the tmp_add_list and tmp_del_list, then + * we can add/delete these mac addr outside the spin lock + */ + list = (mac_type == HCLGEVF_MAC_ADDR_UC) ? + &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list; + + spin_lock_bh(&hdev->mac_table.mac_list_lock); + + list_for_each_entry_safe(mac_node, tmp, list, node) { + switch (mac_node->state) { + case HCLGEVF_MAC_TO_DEL: + list_move_tail(&mac_node->node, &tmp_del_list); + break; + case HCLGEVF_MAC_TO_ADD: + new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC); + if (!new_node) + goto stop_traverse; + + ether_addr_copy(new_node->mac_addr, mac_node->mac_addr); + new_node->state = mac_node->state; + list_add_tail(&new_node->node, &tmp_add_list); + break; + default: + break; + } + } + +stop_traverse: + spin_unlock_bh(&hdev->mac_table.mac_list_lock); + + /* delete first, in order to get max mac table space for adding */ + hclgevf_config_mac_list(hdev, &tmp_del_list, mac_type); + hclgevf_config_mac_list(hdev, &tmp_add_list, mac_type); + + /* if some mac addresses were added/deleted fail, move back to the + * mac_list, and retry at next time. + */ + spin_lock_bh(&hdev->mac_table.mac_list_lock); + + hclgevf_sync_from_del_list(&tmp_del_list, list); + hclgevf_sync_from_add_list(&tmp_add_list, list); + + spin_unlock_bh(&hdev->mac_table.mac_list_lock); +} + +static void hclgevf_sync_mac_table(struct hclgevf_dev *hdev) +{ + hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_UC); + hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_MC); +} + +static void hclgevf_uninit_mac_list(struct hclgevf_dev *hdev) +{ + spin_lock_bh(&hdev->mac_table.mac_list_lock); + + hclgevf_clear_list(&hdev->mac_table.uc_mac_list); + hclgevf_clear_list(&hdev->mac_table.mc_mac_list); + + spin_unlock_bh(&hdev->mac_table.mac_list_lock); +} + +static int hclgevf_enable_vlan_filter(struct hnae3_handle *handle, bool enable) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hnae3_ae_dev *ae_dev = hdev->ae_dev; + struct hclge_vf_to_pf_msg send_msg; + + if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps)) + return -EOPNOTSUPP; + + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, + HCLGE_MBX_ENABLE_VLAN_FILTER); + send_msg.data[0] = enable ? 1 : 0; + + return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); +} + +static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, + __be16 proto, u16 vlan_id, + bool is_kill) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hclge_mbx_vlan_filter *vlan_filter; + struct hclge_vf_to_pf_msg send_msg; + int ret; + + if (vlan_id > HCLGEVF_MAX_VLAN_ID) + return -EINVAL; + + if (proto != htons(ETH_P_8021Q)) + return -EPROTONOSUPPORT; + + /* When device is resetting or reset failed, firmware is unable to + * handle mailbox. Just record the vlan id, and remove it after + * reset finished. + */ + if ((test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || + test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) && is_kill) { + set_bit(vlan_id, hdev->vlan_del_fail_bmap); + return -EBUSY; + } else if (!is_kill && test_bit(vlan_id, hdev->vlan_del_fail_bmap)) { + clear_bit(vlan_id, hdev->vlan_del_fail_bmap); + } + + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, + HCLGE_MBX_VLAN_FILTER); + vlan_filter = (struct hclge_mbx_vlan_filter *)send_msg.data; + vlan_filter->is_kill = is_kill; + vlan_filter->vlan_id = cpu_to_le16(vlan_id); + vlan_filter->proto = cpu_to_le16(be16_to_cpu(proto)); + + /* when remove hw vlan filter failed, record the vlan id, + * and try to remove it from hw later, to be consistence + * with stack. + */ + ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); + if (is_kill && ret) + set_bit(vlan_id, hdev->vlan_del_fail_bmap); + + return ret; +} + +static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev) +{ +#define HCLGEVF_MAX_SYNC_COUNT 60 + struct hnae3_handle *handle = &hdev->nic; + int ret, sync_cnt = 0; + u16 vlan_id; + + if (bitmap_empty(hdev->vlan_del_fail_bmap, VLAN_N_VID)) + return; + + rtnl_lock(); + vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID); + while (vlan_id != VLAN_N_VID) { + ret = hclgevf_set_vlan_filter(handle, htons(ETH_P_8021Q), + vlan_id, true); + if (ret) + break; + + clear_bit(vlan_id, hdev->vlan_del_fail_bmap); + sync_cnt++; + if (sync_cnt >= HCLGEVF_MAX_SYNC_COUNT) + break; + + vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID); + } + rtnl_unlock(); +} + +static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hclge_vf_to_pf_msg send_msg; + + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, + HCLGE_MBX_VLAN_RX_OFF_CFG); + send_msg.data[0] = enable ? 1 : 0; + return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); +} + +static int hclgevf_reset_tqp(struct hnae3_handle *handle) +{ +#define HCLGEVF_RESET_ALL_QUEUE_DONE 1U + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hclge_vf_to_pf_msg send_msg; + u8 return_status = 0; + int ret; + u16 i; + + /* disable vf queue before send queue reset msg to PF */ + ret = hclgevf_tqp_enable(handle, false); + if (ret) { + dev_err(&hdev->pdev->dev, "failed to disable tqp, ret = %d\n", + ret); + return ret; + } + + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0); + + ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &return_status, + sizeof(return_status)); + if (ret || return_status == HCLGEVF_RESET_ALL_QUEUE_DONE) + return ret; + + for (i = 1; i < handle->kinfo.num_tqps; i++) { + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0); + *(__le16 *)send_msg.data = cpu_to_le16(i); + ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); + if (ret) + return ret; + } + + return 0; +} + +static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hclge_mbx_mtu_info *mtu_info; + struct hclge_vf_to_pf_msg send_msg; + + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_MTU, 0); + mtu_info = (struct hclge_mbx_mtu_info *)send_msg.data; + mtu_info->mtu = cpu_to_le32(new_mtu); + + return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); +} + +static int hclgevf_notify_client(struct hclgevf_dev *hdev, + enum hnae3_reset_notify_type type) +{ + struct hnae3_client *client = hdev->nic_client; + struct hnae3_handle *handle = &hdev->nic; + int ret; + + if (!test_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state) || + !client) + return 0; + + if (!client->ops->reset_notify) + return -EOPNOTSUPP; + + ret = client->ops->reset_notify(handle, type); + if (ret) + dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n", + type, ret); + + return ret; +} + +static int hclgevf_notify_roce_client(struct hclgevf_dev *hdev, + enum hnae3_reset_notify_type type) +{ + struct hnae3_client *client = hdev->roce_client; + struct hnae3_handle *handle = &hdev->roce; + int ret; + + if (!test_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state) || !client) + return 0; + + if (!client->ops->reset_notify) + return -EOPNOTSUPP; + + ret = client->ops->reset_notify(handle, type); + if (ret) + dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)", + type, ret); + return ret; +} + +static int hclgevf_reset_wait(struct hclgevf_dev *hdev) +{ +#define HCLGEVF_RESET_WAIT_US 20000 +#define HCLGEVF_RESET_WAIT_CNT 2000 +#define HCLGEVF_RESET_WAIT_TIMEOUT_US \ + (HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT) + + u32 val; + int ret; + + if (hdev->reset_type == HNAE3_VF_RESET) + ret = readl_poll_timeout(hdev->hw.hw.io_base + + HCLGEVF_VF_RST_ING, val, + !(val & HCLGEVF_VF_RST_ING_BIT), + HCLGEVF_RESET_WAIT_US, + HCLGEVF_RESET_WAIT_TIMEOUT_US); + else + ret = readl_poll_timeout(hdev->hw.hw.io_base + + HCLGEVF_RST_ING, val, + !(val & HCLGEVF_RST_ING_BITS), + HCLGEVF_RESET_WAIT_US, + HCLGEVF_RESET_WAIT_TIMEOUT_US); + + /* hardware completion status should be available by this time */ + if (ret) { + dev_err(&hdev->pdev->dev, + "couldn't get reset done status from h/w, timeout!\n"); + return ret; + } + + /* we will wait a bit more to let reset of the stack to complete. This + * might happen in case reset assertion was made by PF. Yes, this also + * means we might end up waiting bit more even for VF reset. + */ + if (hdev->reset_type == HNAE3_VF_FULL_RESET) + msleep(5000); + else + msleep(500); + + return 0; +} + +static void hclgevf_reset_handshake(struct hclgevf_dev *hdev, bool enable) +{ + u32 reg_val; + + reg_val = hclgevf_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG); + if (enable) + reg_val |= HCLGEVF_NIC_SW_RST_RDY; + else + reg_val &= ~HCLGEVF_NIC_SW_RST_RDY; + + hclgevf_write_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG, + reg_val); +} + +static int hclgevf_reset_stack(struct hclgevf_dev *hdev) +{ + int ret; + + /* uninitialize the nic client */ + ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); + if (ret) + return ret; + + /* re-initialize the hclge device */ + ret = hclgevf_reset_hdev(hdev); + if (ret) { + dev_err(&hdev->pdev->dev, + "hclge device re-init failed, VF is disabled!\n"); + return ret; + } + + /* bring up the nic client again */ + ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT); + if (ret) + return ret; + + /* clear handshake status with IMP */ + hclgevf_reset_handshake(hdev, false); + + /* bring up the nic to enable TX/RX again */ + return hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); +} + +static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) +{ +#define HCLGEVF_RESET_SYNC_TIME 100 + + if (hdev->reset_type == HNAE3_VF_FUNC_RESET) { + struct hclge_vf_to_pf_msg send_msg; + int ret; + + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_RESET, 0); + ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to assert VF reset, ret = %d\n", ret); + return ret; + } + hdev->rst_stats.vf_func_rst_cnt++; + } + + set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); + /* inform hardware that preparatory work is done */ + msleep(HCLGEVF_RESET_SYNC_TIME); + hclgevf_reset_handshake(hdev, true); + dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done\n", + hdev->reset_type); + + return 0; +} + +static void hclgevf_dump_rst_info(struct hclgevf_dev *hdev) +{ + dev_info(&hdev->pdev->dev, "VF function reset count: %u\n", + hdev->rst_stats.vf_func_rst_cnt); + dev_info(&hdev->pdev->dev, "FLR reset count: %u\n", + hdev->rst_stats.flr_rst_cnt); + dev_info(&hdev->pdev->dev, "VF reset count: %u\n", + hdev->rst_stats.vf_rst_cnt); + dev_info(&hdev->pdev->dev, "reset done count: %u\n", + hdev->rst_stats.rst_done_cnt); + dev_info(&hdev->pdev->dev, "HW reset done count: %u\n", + hdev->rst_stats.hw_rst_done_cnt); + dev_info(&hdev->pdev->dev, "reset count: %u\n", + hdev->rst_stats.rst_cnt); + dev_info(&hdev->pdev->dev, "reset fail count: %u\n", + hdev->rst_stats.rst_fail_cnt); + dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n", + hclgevf_read_dev(&hdev->hw, HCLGEVF_MISC_VECTOR_REG_BASE)); + dev_info(&hdev->pdev->dev, "vector0 interrupt status: 0x%x\n", + hclgevf_read_dev(&hdev->hw, HCLGE_COMM_VECTOR0_CMDQ_STATE_REG)); + dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n", + hclgevf_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG)); + dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n", + hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING)); + dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state); +} + +static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev) +{ + /* recover handshake status with IMP when reset fail */ + hclgevf_reset_handshake(hdev, true); + hdev->rst_stats.rst_fail_cnt++; + dev_err(&hdev->pdev->dev, "failed to reset VF(%u)\n", + hdev->rst_stats.rst_fail_cnt); + + if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT) + set_bit(hdev->reset_type, &hdev->reset_pending); + + if (hclgevf_is_reset_pending(hdev)) { + set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); + hclgevf_reset_task_schedule(hdev); + } else { + set_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); + hclgevf_dump_rst_info(hdev); + } +} + +static int hclgevf_reset_prepare(struct hclgevf_dev *hdev) +{ + int ret; + + hdev->rst_stats.rst_cnt++; + + /* perform reset of the stack & ae device for a client */ + ret = hclgevf_notify_roce_client(hdev, HNAE3_DOWN_CLIENT); + if (ret) + return ret; + + rtnl_lock(); + /* bring down the nic to stop any ongoing TX/RX */ + ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); + rtnl_unlock(); + if (ret) + return ret; + + return hclgevf_reset_prepare_wait(hdev); +} + +static int hclgevf_reset_rebuild(struct hclgevf_dev *hdev) +{ + int ret; + + hdev->rst_stats.hw_rst_done_cnt++; + ret = hclgevf_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT); + if (ret) + return ret; + + rtnl_lock(); + /* now, re-initialize the nic client and ae device */ + ret = hclgevf_reset_stack(hdev); + rtnl_unlock(); + if (ret) { + dev_err(&hdev->pdev->dev, "failed to reset VF stack\n"); + return ret; + } + + ret = hclgevf_notify_roce_client(hdev, HNAE3_INIT_CLIENT); + /* ignore RoCE notify error if it fails HCLGEVF_RESET_MAX_FAIL_CNT - 1 + * times + */ + if (ret && + hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT - 1) + return ret; + + ret = hclgevf_notify_roce_client(hdev, HNAE3_UP_CLIENT); + if (ret) + return ret; + + hdev->last_reset_time = jiffies; + hdev->rst_stats.rst_done_cnt++; + hdev->rst_stats.rst_fail_cnt = 0; + clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); + + return 0; +} + +static void hclgevf_reset(struct hclgevf_dev *hdev) +{ + if (hclgevf_reset_prepare(hdev)) + goto err_reset; + + /* check if VF could successfully fetch the hardware reset completion + * status from the hardware + */ + if (hclgevf_reset_wait(hdev)) { + /* can't do much in this situation, will disable VF */ + dev_err(&hdev->pdev->dev, + "failed to fetch H/W reset completion status\n"); + goto err_reset; + } + + if (hclgevf_reset_rebuild(hdev)) + goto err_reset; + + return; + +err_reset: + hclgevf_reset_err_handle(hdev); +} + +static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev, + unsigned long *addr) +{ + enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; + + /* return the highest priority reset level amongst all */ + if (test_bit(HNAE3_VF_RESET, addr)) { + rst_level = HNAE3_VF_RESET; + clear_bit(HNAE3_VF_RESET, addr); + clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); + clear_bit(HNAE3_VF_FUNC_RESET, addr); + } else if (test_bit(HNAE3_VF_FULL_RESET, addr)) { + rst_level = HNAE3_VF_FULL_RESET; + clear_bit(HNAE3_VF_FULL_RESET, addr); + clear_bit(HNAE3_VF_FUNC_RESET, addr); + } else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) { + rst_level = HNAE3_VF_PF_FUNC_RESET; + clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); + clear_bit(HNAE3_VF_FUNC_RESET, addr); + } else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) { + rst_level = HNAE3_VF_FUNC_RESET; + clear_bit(HNAE3_VF_FUNC_RESET, addr); + } else if (test_bit(HNAE3_FLR_RESET, addr)) { + rst_level = HNAE3_FLR_RESET; + clear_bit(HNAE3_FLR_RESET, addr); + } + + return rst_level; +} + +static void hclgevf_reset_event(struct pci_dev *pdev, + struct hnae3_handle *handle) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); + struct hclgevf_dev *hdev = ae_dev->priv; + + dev_info(&hdev->pdev->dev, "received reset request from VF enet\n"); + + if (hdev->default_reset_request) + hdev->reset_level = + hclgevf_get_reset_level(hdev, + &hdev->default_reset_request); + else + hdev->reset_level = HNAE3_VF_FUNC_RESET; + + /* reset of this VF requested */ + set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state); + hclgevf_reset_task_schedule(hdev); + + hdev->last_reset_time = jiffies; +} + +static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev, + enum hnae3_reset_type rst_type) +{ + struct hclgevf_dev *hdev = ae_dev->priv; + + set_bit(rst_type, &hdev->default_reset_request); +} + +static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) +{ + writel(en ? 1 : 0, vector->addr); +} + +static void hclgevf_reset_prepare_general(struct hnae3_ae_dev *ae_dev, + enum hnae3_reset_type rst_type) +{ +#define HCLGEVF_RESET_RETRY_WAIT_MS 500 +#define HCLGEVF_RESET_RETRY_CNT 5 + + struct hclgevf_dev *hdev = ae_dev->priv; + int retry_cnt = 0; + int ret; + + while (retry_cnt++ < HCLGEVF_RESET_RETRY_CNT) { + down(&hdev->reset_sem); + set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); + hdev->reset_type = rst_type; + ret = hclgevf_reset_prepare(hdev); + if (!ret && !hdev->reset_pending) + break; + + dev_err(&hdev->pdev->dev, + "failed to prepare to reset, ret=%d, reset_pending:0x%lx, retry_cnt:%d\n", + ret, hdev->reset_pending, retry_cnt); + clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); + up(&hdev->reset_sem); + msleep(HCLGEVF_RESET_RETRY_WAIT_MS); + } + + /* disable misc vector before reset done */ + hclgevf_enable_vector(&hdev->misc_vector, false); + + if (hdev->reset_type == HNAE3_FLR_RESET) + hdev->rst_stats.flr_rst_cnt++; +} + +static void hclgevf_reset_done(struct hnae3_ae_dev *ae_dev) +{ + struct hclgevf_dev *hdev = ae_dev->priv; + int ret; + + hclgevf_enable_vector(&hdev->misc_vector, true); + + ret = hclgevf_reset_rebuild(hdev); + if (ret) + dev_warn(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", + ret); + + hdev->reset_type = HNAE3_NONE_RESET; + clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); + up(&hdev->reset_sem); +} + +static u32 hclgevf_get_fw_version(struct hnae3_handle *handle) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + return hdev->fw_version; +} + +static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev) +{ + struct hclgevf_misc_vector *vector = &hdev->misc_vector; + + vector->vector_irq = pci_irq_vector(hdev->pdev, + HCLGEVF_MISC_VECTOR_NUM); + vector->addr = hdev->hw.hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE; + /* vector status always valid for Vector 0 */ + hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0; + hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq; + + hdev->num_msi_left -= 1; + hdev->num_msi_used += 1; +} + +void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev) +{ + if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && + test_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state) && + !test_and_set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, + &hdev->state)) + mod_delayed_work(hclgevf_wq, &hdev->service_task, 0); +} + +void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev) +{ + if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && + !test_and_set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, + &hdev->state)) + mod_delayed_work(hclgevf_wq, &hdev->service_task, 0); +} + +static void hclgevf_task_schedule(struct hclgevf_dev *hdev, + unsigned long delay) +{ + if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && + !test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) + mod_delayed_work(hclgevf_wq, &hdev->service_task, delay); +} + +static void hclgevf_reset_service_task(struct hclgevf_dev *hdev) +{ +#define HCLGEVF_MAX_RESET_ATTEMPTS_CNT 3 + + if (!test_and_clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state)) + return; + + down(&hdev->reset_sem); + set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); + + if (test_and_clear_bit(HCLGEVF_RESET_PENDING, + &hdev->reset_state)) { + /* PF has intimated that it is about to reset the hardware. + * We now have to poll & check if hardware has actually + * completed the reset sequence. On hardware reset completion, + * VF needs to reset the client and ae device. + */ + hdev->reset_attempts = 0; + + hdev->last_reset_time = jiffies; + hdev->reset_type = + hclgevf_get_reset_level(hdev, &hdev->reset_pending); + if (hdev->reset_type != HNAE3_NONE_RESET) + hclgevf_reset(hdev); + } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, + &hdev->reset_state)) { + /* we could be here when either of below happens: + * 1. reset was initiated due to watchdog timeout caused by + * a. IMP was earlier reset and our TX got choked down and + * which resulted in watchdog reacting and inducing VF + * reset. This also means our cmdq would be unreliable. + * b. problem in TX due to other lower layer(example link + * layer not functioning properly etc.) + * 2. VF reset might have been initiated due to some config + * change. + * + * NOTE: Theres no clear way to detect above cases than to react + * to the response of PF for this reset request. PF will ack the + * 1b and 2. cases but we will not get any intimation about 1a + * from PF as cmdq would be in unreliable state i.e. mailbox + * communication between PF and VF would be broken. + * + * if we are never geting into pending state it means either: + * 1. PF is not receiving our request which could be due to IMP + * reset + * 2. PF is screwed + * We cannot do much for 2. but to check first we can try reset + * our PCIe + stack and see if it alleviates the problem. + */ + if (hdev->reset_attempts > HCLGEVF_MAX_RESET_ATTEMPTS_CNT) { + /* prepare for full reset of stack + pcie interface */ + set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending); + + /* "defer" schedule the reset task again */ + set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); + } else { + hdev->reset_attempts++; + + set_bit(hdev->reset_level, &hdev->reset_pending); + set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); + } + hclgevf_reset_task_schedule(hdev); + } + + hdev->reset_type = HNAE3_NONE_RESET; + clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); + up(&hdev->reset_sem); +} + +static void hclgevf_mailbox_service_task(struct hclgevf_dev *hdev) +{ + if (!test_and_clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state)) + return; + + if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) + return; + + hclgevf_mbx_async_handler(hdev); + + clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); +} + +static void hclgevf_keep_alive(struct hclgevf_dev *hdev) +{ + struct hclge_vf_to_pf_msg send_msg; + int ret; + + if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state)) + return; + + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_KEEP_ALIVE, 0); + ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); + if (ret) + dev_err(&hdev->pdev->dev, + "VF sends keep alive cmd failed(=%d)\n", ret); +} + +static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev) +{ + unsigned long delta = round_jiffies_relative(HZ); + struct hnae3_handle *handle = &hdev->nic; + + if (test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state) || + test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state)) + return; + + if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) { + delta = jiffies - hdev->last_serv_processed; + + if (delta < round_jiffies_relative(HZ)) { + delta = round_jiffies_relative(HZ) - delta; + goto out; + } + } + + hdev->serv_processed_cnt++; + if (!(hdev->serv_processed_cnt % HCLGEVF_KEEP_ALIVE_TASK_INTERVAL)) + hclgevf_keep_alive(hdev); + + if (test_bit(HCLGEVF_STATE_DOWN, &hdev->state)) { + hdev->last_serv_processed = jiffies; + goto out; + } + + if (!(hdev->serv_processed_cnt % HCLGEVF_STATS_TIMER_INTERVAL)) + hclge_comm_tqps_update_stats(handle, &hdev->hw.hw); + + /* VF does not need to request link status when this bit is set, because + * PF will push its link status to VFs when link status changed. + */ + if (!test_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS, &hdev->state)) + hclgevf_request_link_info(hdev); + + hclgevf_update_link_mode(hdev); + + hclgevf_sync_vlan_filter(hdev); + + hclgevf_sync_mac_table(hdev); + + hclgevf_sync_promisc_mode(hdev); + + hdev->last_serv_processed = jiffies; + +out: + hclgevf_task_schedule(hdev, delta); +} + +static void hclgevf_service_task(struct work_struct *work) +{ + struct hclgevf_dev *hdev = container_of(work, struct hclgevf_dev, + service_task.work); + + hclgevf_reset_service_task(hdev); + hclgevf_mailbox_service_task(hdev); + hclgevf_periodic_service_task(hdev); + + /* Handle reset and mbx again in case periodical task delays the + * handling by calling hclgevf_task_schedule() in + * hclgevf_periodic_service_task() + */ + hclgevf_reset_service_task(hdev); + hclgevf_mailbox_service_task(hdev); +} + +static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) +{ + hclgevf_write_dev(&hdev->hw, HCLGE_COMM_VECTOR0_CMDQ_SRC_REG, regclr); +} + +static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, + u32 *clearval) +{ + u32 val, cmdq_stat_reg, rst_ing_reg; + + /* fetch the events from their corresponding regs */ + cmdq_stat_reg = hclgevf_read_dev(&hdev->hw, + HCLGE_COMM_VECTOR0_CMDQ_STATE_REG); + if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_stat_reg) { + rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); + dev_info(&hdev->pdev->dev, + "receive reset interrupt 0x%x!\n", rst_ing_reg); + set_bit(HNAE3_VF_RESET, &hdev->reset_pending); + set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); + set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); + *clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B); + hdev->rst_stats.vf_rst_cnt++; + /* set up VF hardware reset status, its PF will clear + * this status when PF has initialized done. + */ + val = hclgevf_read_dev(&hdev->hw, HCLGEVF_VF_RST_ING); + hclgevf_write_dev(&hdev->hw, HCLGEVF_VF_RST_ING, + val | HCLGEVF_VF_RST_ING_BIT); + return HCLGEVF_VECTOR0_EVENT_RST; + } + + /* check for vector0 mailbox(=CMDQ RX) event source */ + if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_stat_reg) { + /* for revision 0x21, clearing interrupt is writing bit 0 + * to the clear register, writing bit 1 means to keep the + * old value. + * for revision 0x20, the clear register is a read & write + * register, so we should just write 0 to the bit we are + * handling, and keep other bits as cmdq_stat_reg. + */ + if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) + *clearval = ~(1U << HCLGEVF_VECTOR0_RX_CMDQ_INT_B); + else + *clearval = cmdq_stat_reg & + ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B); + + return HCLGEVF_VECTOR0_EVENT_MBX; + } + + /* print other vector0 event source */ + dev_info(&hdev->pdev->dev, + "vector 0 interrupt from unknown source, cmdq_src = %#x\n", + cmdq_stat_reg); + + return HCLGEVF_VECTOR0_EVENT_OTHER; +} + +static void hclgevf_reset_timer(struct timer_list *t) +{ + struct hclgevf_dev *hdev = from_timer(hdev, t, reset_timer); + + hclgevf_clear_event_cause(hdev, HCLGEVF_VECTOR0_EVENT_RST); + hclgevf_reset_task_schedule(hdev); +} + +static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) +{ +#define HCLGEVF_RESET_DELAY 5 + + enum hclgevf_evt_cause event_cause; + struct hclgevf_dev *hdev = data; + u32 clearval; + + hclgevf_enable_vector(&hdev->misc_vector, false); + event_cause = hclgevf_check_evt_cause(hdev, &clearval); + if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) + hclgevf_clear_event_cause(hdev, clearval); + + switch (event_cause) { + case HCLGEVF_VECTOR0_EVENT_RST: + mod_timer(&hdev->reset_timer, + jiffies + msecs_to_jiffies(HCLGEVF_RESET_DELAY)); + break; + case HCLGEVF_VECTOR0_EVENT_MBX: + hclgevf_mbx_handler(hdev); + break; + default: + break; + } + + hclgevf_enable_vector(&hdev->misc_vector, true); + + return IRQ_HANDLED; +} + +static int hclgevf_configure(struct hclgevf_dev *hdev) +{ + int ret; + + hdev->gro_en = true; + + ret = hclgevf_get_basic_info(hdev); + if (ret) + return ret; + + /* get current port based vlan state from PF */ + ret = hclgevf_get_port_base_vlan_filter_state(hdev); + if (ret) + return ret; + + /* get queue configuration from PF */ + ret = hclgevf_get_queue_info(hdev); + if (ret) + return ret; + + /* get queue depth info from PF */ + ret = hclgevf_get_queue_depth(hdev); + if (ret) + return ret; + + return hclgevf_get_pf_media_type(hdev); +} + +static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev) +{ + struct pci_dev *pdev = ae_dev->pdev; + struct hclgevf_dev *hdev; + + hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); + if (!hdev) + return -ENOMEM; + + hdev->pdev = pdev; + hdev->ae_dev = ae_dev; + ae_dev->priv = hdev; + + return 0; +} + +static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) +{ + struct hnae3_handle *roce = &hdev->roce; + struct hnae3_handle *nic = &hdev->nic; + + roce->rinfo.num_vectors = hdev->num_roce_msix; + + if (hdev->num_msi_left < roce->rinfo.num_vectors || + hdev->num_msi_left == 0) + return -EINVAL; + + roce->rinfo.base_vector = hdev->roce_base_msix_offset; + + roce->rinfo.netdev = nic->kinfo.netdev; + roce->rinfo.roce_io_base = hdev->hw.hw.io_base; + roce->rinfo.roce_mem_base = hdev->hw.hw.mem_base; + + roce->pdev = nic->pdev; + roce->ae_algo = nic->ae_algo; + roce->numa_node_mask = nic->numa_node_mask; + + return 0; +} + +static int hclgevf_config_gro(struct hclgevf_dev *hdev) +{ + struct hclgevf_cfg_gro_status_cmd *req; + struct hclge_desc desc; + int ret; + + if (!hnae3_ae_dev_gro_supported(hdev->ae_dev)) + return 0; + + hclgevf_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, + false); + req = (struct hclgevf_cfg_gro_status_cmd *)desc.data; + + req->gro_en = hdev->gro_en ? 1 : 0; + + ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "VF GRO hardware config cmd failed, ret = %d.\n", ret); + + return ret; +} + +static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) +{ + struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; + u16 tc_offset[HCLGE_COMM_MAX_TC_NUM]; + u16 tc_valid[HCLGE_COMM_MAX_TC_NUM]; + u16 tc_size[HCLGE_COMM_MAX_TC_NUM]; + int ret; + + if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { + ret = hclge_comm_set_rss_algo_key(&hdev->hw.hw, + rss_cfg->rss_algo, + rss_cfg->rss_hash_key); + if (ret) + return ret; + + ret = hclge_comm_set_rss_input_tuple(&hdev->nic, &hdev->hw.hw, + false, rss_cfg); + if (ret) + return ret; + } + + ret = hclge_comm_set_rss_indir_table(hdev->ae_dev, &hdev->hw.hw, + rss_cfg->rss_indirection_tbl); + if (ret) + return ret; + + hclge_comm_get_rss_tc_info(rss_cfg->rss_size, hdev->hw_tc_map, + tc_offset, tc_valid, tc_size); + + return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, + tc_valid, tc_size); +} + +static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) +{ + struct hnae3_handle *nic = &hdev->nic; + int ret; + + ret = hclgevf_en_hw_strip_rxvtag(nic, true); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to enable rx vlan offload, ret = %d\n", ret); + return ret; + } + + return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, + false); +} + +static void hclgevf_flush_link_update(struct hclgevf_dev *hdev) +{ +#define HCLGEVF_FLUSH_LINK_TIMEOUT 100000 + + unsigned long last = hdev->serv_processed_cnt; + int i = 0; + + while (test_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state) && + i++ < HCLGEVF_FLUSH_LINK_TIMEOUT && + last == hdev->serv_processed_cnt) + usleep_range(1, 1); +} + +static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + if (enable) { + hclgevf_task_schedule(hdev, 0); + } else { + set_bit(HCLGEVF_STATE_DOWN, &hdev->state); + + /* flush memory to make sure DOWN is seen by service task */ + smp_mb__before_atomic(); + hclgevf_flush_link_update(hdev); + } +} + +static int hclgevf_ae_start(struct hnae3_handle *handle) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); + clear_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS, &hdev->state); + + hclge_comm_reset_tqp_stats(handle); + + hclgevf_request_link_info(hdev); + + hclgevf_update_link_mode(hdev); + + return 0; +} + +static void hclgevf_ae_stop(struct hnae3_handle *handle) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + set_bit(HCLGEVF_STATE_DOWN, &hdev->state); + + if (hdev->reset_type != HNAE3_VF_RESET) + hclgevf_reset_tqp(handle); + + hclge_comm_reset_tqp_stats(handle); + hclgevf_update_link_status(hdev, 0); +} + +static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive) +{ +#define HCLGEVF_STATE_ALIVE 1 +#define HCLGEVF_STATE_NOT_ALIVE 0 + + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hclge_vf_to_pf_msg send_msg; + + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_ALIVE, 0); + send_msg.data[0] = alive ? HCLGEVF_STATE_ALIVE : + HCLGEVF_STATE_NOT_ALIVE; + return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); +} + +static int hclgevf_client_start(struct hnae3_handle *handle) +{ + return hclgevf_set_alive(handle, true); +} + +static void hclgevf_client_stop(struct hnae3_handle *handle) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + int ret; + + ret = hclgevf_set_alive(handle, false); + if (ret) + dev_warn(&hdev->pdev->dev, + "%s failed %d\n", __func__, ret); +} + +static void hclgevf_state_init(struct hclgevf_dev *hdev) +{ + clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); + clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); + clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); + + INIT_DELAYED_WORK(&hdev->service_task, hclgevf_service_task); + + mutex_init(&hdev->mbx_resp.mbx_mutex); + sema_init(&hdev->reset_sem, 1); + + spin_lock_init(&hdev->mac_table.mac_list_lock); + INIT_LIST_HEAD(&hdev->mac_table.uc_mac_list); + INIT_LIST_HEAD(&hdev->mac_table.mc_mac_list); + + /* bring the device down */ + set_bit(HCLGEVF_STATE_DOWN, &hdev->state); +} + +static void hclgevf_state_uninit(struct hclgevf_dev *hdev) +{ + set_bit(HCLGEVF_STATE_DOWN, &hdev->state); + set_bit(HCLGEVF_STATE_REMOVING, &hdev->state); + + if (hdev->service_task.work.func) + cancel_delayed_work_sync(&hdev->service_task); + + mutex_destroy(&hdev->mbx_resp.mbx_mutex); +} + +static int hclgevf_init_msi(struct hclgevf_dev *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + int vectors; + int i; + + if (hnae3_dev_roce_supported(hdev)) + vectors = pci_alloc_irq_vectors(pdev, + hdev->roce_base_msix_offset + 1, + hdev->num_msi, + PCI_IRQ_MSIX); + else + vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM, + hdev->num_msi, + PCI_IRQ_MSI | PCI_IRQ_MSIX); + + if (vectors < 0) { + dev_err(&pdev->dev, + "failed(%d) to allocate MSI/MSI-X vectors\n", + vectors); + return vectors; + } + if (vectors < hdev->num_msi) + dev_warn(&hdev->pdev->dev, + "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n", + hdev->num_msi, vectors); + + hdev->num_msi = vectors; + hdev->num_msi_left = vectors; + + hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, + sizeof(u16), GFP_KERNEL); + if (!hdev->vector_status) { + pci_free_irq_vectors(pdev); + return -ENOMEM; + } + + for (i = 0; i < hdev->num_msi; i++) + hdev->vector_status[i] = HCLGEVF_INVALID_VPORT; + + hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, + sizeof(int), GFP_KERNEL); + if (!hdev->vector_irq) { + devm_kfree(&pdev->dev, hdev->vector_status); + pci_free_irq_vectors(pdev); + return -ENOMEM; + } + + return 0; +} + +static void hclgevf_uninit_msi(struct hclgevf_dev *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + + devm_kfree(&pdev->dev, hdev->vector_status); + devm_kfree(&pdev->dev, hdev->vector_irq); + pci_free_irq_vectors(pdev); +} + +static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) +{ + int ret; + + hclgevf_get_misc_vector(hdev); + + snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s", + HCLGEVF_NAME, pci_name(hdev->pdev)); + ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle, + 0, hdev->misc_vector.name, hdev); + if (ret) { + dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n", + hdev->misc_vector.vector_irq); + return ret; + } + + hclgevf_clear_event_cause(hdev, 0); + + /* enable misc. vector(vector 0) */ + hclgevf_enable_vector(&hdev->misc_vector, true); + + return ret; +} + +static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) +{ + /* disable misc vector(vector 0) */ + hclgevf_enable_vector(&hdev->misc_vector, false); + synchronize_irq(hdev->misc_vector.vector_irq); + free_irq(hdev->misc_vector.vector_irq, hdev); + hclgevf_free_vector(hdev, 0); +} + +static void hclgevf_info_show(struct hclgevf_dev *hdev) +{ + struct device *dev = &hdev->pdev->dev; + + dev_info(dev, "VF info begin:\n"); + + dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps); + dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc); + dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc); + dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport); + dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map); + dev_info(dev, "PF media type of this VF: %u\n", + hdev->hw.mac.media_type); + + dev_info(dev, "VF info end.\n"); +} + +static int hclgevf_init_nic_client_instance(struct hnae3_ae_dev *ae_dev, + struct hnae3_client *client) +{ + struct hclgevf_dev *hdev = ae_dev->priv; + int rst_cnt = hdev->rst_stats.rst_cnt; + int ret; + + ret = client->ops->init_instance(&hdev->nic); + if (ret) + return ret; + + set_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); + if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || + rst_cnt != hdev->rst_stats.rst_cnt) { + clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); + + client->ops->uninit_instance(&hdev->nic, 0); + return -EBUSY; + } + + hnae3_set_client_init_flag(client, ae_dev, 1); + + if (netif_msg_drv(&hdev->nic)) + hclgevf_info_show(hdev); + + return 0; +} + +static int hclgevf_init_roce_client_instance(struct hnae3_ae_dev *ae_dev, + struct hnae3_client *client) +{ + struct hclgevf_dev *hdev = ae_dev->priv; + int ret; + + if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client || + !hdev->nic_client) + return 0; + + ret = hclgevf_init_roce_base_info(hdev); + if (ret) + return ret; + + ret = client->ops->init_instance(&hdev->roce); + if (ret) + return ret; + + set_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state); + hnae3_set_client_init_flag(client, ae_dev, 1); + + return 0; +} + +static int hclgevf_init_client_instance(struct hnae3_client *client, + struct hnae3_ae_dev *ae_dev) +{ + struct hclgevf_dev *hdev = ae_dev->priv; + int ret; + + switch (client->type) { + case HNAE3_CLIENT_KNIC: + hdev->nic_client = client; + hdev->nic.client = client; + + ret = hclgevf_init_nic_client_instance(ae_dev, client); + if (ret) + goto clear_nic; + + ret = hclgevf_init_roce_client_instance(ae_dev, + hdev->roce_client); + if (ret) + goto clear_roce; + + break; + case HNAE3_CLIENT_ROCE: + if (hnae3_dev_roce_supported(hdev)) { + hdev->roce_client = client; + hdev->roce.client = client; + } + + ret = hclgevf_init_roce_client_instance(ae_dev, client); + if (ret) + goto clear_roce; + + break; + default: + return -EINVAL; + } + + return 0; + +clear_nic: + hdev->nic_client = NULL; + hdev->nic.client = NULL; + return ret; +clear_roce: + hdev->roce_client = NULL; + hdev->roce.client = NULL; + return ret; +} + +static void hclgevf_uninit_client_instance(struct hnae3_client *client, + struct hnae3_ae_dev *ae_dev) +{ + struct hclgevf_dev *hdev = ae_dev->priv; + + /* un-init roce, if it exists */ + if (hdev->roce_client) { + while (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) + msleep(HCLGEVF_WAIT_RESET_DONE); + clear_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state); + + hdev->roce_client->ops->uninit_instance(&hdev->roce, 0); + hdev->roce_client = NULL; + hdev->roce.client = NULL; + } + + /* un-init nic/unic, if this was not called by roce client */ + if (client->ops->uninit_instance && hdev->nic_client && + client->type != HNAE3_CLIENT_ROCE) { + while (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) + msleep(HCLGEVF_WAIT_RESET_DONE); + clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); + + client->ops->uninit_instance(&hdev->nic, 0); + hdev->nic_client = NULL; + hdev->nic.client = NULL; + } +} + +static int hclgevf_dev_mem_map(struct hclgevf_dev *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + struct hclgevf_hw *hw = &hdev->hw; + + /* for device does not have device memory, return directly */ + if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGEVF_MEM_BAR))) + return 0; + + hw->hw.mem_base = + devm_ioremap_wc(&pdev->dev, + pci_resource_start(pdev, HCLGEVF_MEM_BAR), + pci_resource_len(pdev, HCLGEVF_MEM_BAR)); + if (!hw->hw.mem_base) { + dev_err(&pdev->dev, "failed to map device memory\n"); + return -EFAULT; + } + + return 0; +} + +static int hclgevf_pci_init(struct hclgevf_dev *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + struct hclgevf_hw *hw; + int ret; + + ret = pci_enable_device(pdev); + if (ret) { + dev_err(&pdev->dev, "failed to enable PCI device\n"); + return ret; + } + + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (ret) { + dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting"); + goto err_disable_device; + } + + ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME); + if (ret) { + dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); + goto err_disable_device; + } + + pci_set_master(pdev); + hw = &hdev->hw; + hw->hw.io_base = pci_iomap(pdev, 2, 0); + if (!hw->hw.io_base) { + dev_err(&pdev->dev, "can't map configuration register space\n"); + ret = -ENOMEM; + goto err_clr_master; + } + + ret = hclgevf_dev_mem_map(hdev); + if (ret) + goto err_unmap_io_base; + + return 0; + +err_unmap_io_base: + pci_iounmap(pdev, hdev->hw.hw.io_base); +err_clr_master: + pci_clear_master(pdev); + pci_release_regions(pdev); +err_disable_device: + pci_disable_device(pdev); + + return ret; +} + +static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + + if (hdev->hw.hw.mem_base) + devm_iounmap(&pdev->dev, hdev->hw.hw.mem_base); + + pci_iounmap(pdev, hdev->hw.hw.io_base); + pci_clear_master(pdev); + pci_release_regions(pdev); + pci_disable_device(pdev); +} + +static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev) +{ + struct hclgevf_query_res_cmd *req; + struct hclge_desc desc; + int ret; + + hclgevf_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RSRC, true); + ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "query vf resource failed, ret = %d.\n", ret); + return ret; + } + + req = (struct hclgevf_query_res_cmd *)desc.data; + + if (hnae3_dev_roce_supported(hdev)) { + hdev->roce_base_msix_offset = + hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee), + HCLGEVF_MSIX_OFT_ROCEE_M, + HCLGEVF_MSIX_OFT_ROCEE_S); + hdev->num_roce_msix = + hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number), + HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); + + /* nic's msix numbers is always equals to the roce's. */ + hdev->num_nic_msix = hdev->num_roce_msix; + + /* VF should have NIC vectors and Roce vectors, NIC vectors + * are queued before Roce vectors. The offset is fixed to 64. + */ + hdev->num_msi = hdev->num_roce_msix + + hdev->roce_base_msix_offset; + } else { + hdev->num_msi = + hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number), + HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); + + hdev->num_nic_msix = hdev->num_msi; + } + + if (hdev->num_nic_msix < HNAE3_MIN_VECTOR_NUM) { + dev_err(&hdev->pdev->dev, + "Just %u msi resources, not enough for vf(min:2).\n", + hdev->num_nic_msix); + return -EINVAL; + } + + return 0; +} + +static void hclgevf_set_default_dev_specs(struct hclgevf_dev *hdev) +{ +#define HCLGEVF_MAX_NON_TSO_BD_NUM 8U + + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); + + ae_dev->dev_specs.max_non_tso_bd_num = + HCLGEVF_MAX_NON_TSO_BD_NUM; + ae_dev->dev_specs.rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE; + ae_dev->dev_specs.rss_key_size = HCLGE_COMM_RSS_KEY_SIZE; + ae_dev->dev_specs.max_int_gl = HCLGEVF_DEF_MAX_INT_GL; + ae_dev->dev_specs.max_frm_size = HCLGEVF_MAC_MAX_FRAME; +} + +static void hclgevf_parse_dev_specs(struct hclgevf_dev *hdev, + struct hclge_desc *desc) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); + struct hclgevf_dev_specs_0_cmd *req0; + struct hclgevf_dev_specs_1_cmd *req1; + + req0 = (struct hclgevf_dev_specs_0_cmd *)desc[0].data; + req1 = (struct hclgevf_dev_specs_1_cmd *)desc[1].data; + + ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num; + ae_dev->dev_specs.rss_ind_tbl_size = + le16_to_cpu(req0->rss_ind_tbl_size); + ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max); + ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size); + ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl); + ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size); +} + +static void hclgevf_check_dev_specs(struct hclgevf_dev *hdev) +{ + struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs; + + if (!dev_specs->max_non_tso_bd_num) + dev_specs->max_non_tso_bd_num = HCLGEVF_MAX_NON_TSO_BD_NUM; + if (!dev_specs->rss_ind_tbl_size) + dev_specs->rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE; + if (!dev_specs->rss_key_size) + dev_specs->rss_key_size = HCLGE_COMM_RSS_KEY_SIZE; + if (!dev_specs->max_int_gl) + dev_specs->max_int_gl = HCLGEVF_DEF_MAX_INT_GL; + if (!dev_specs->max_frm_size) + dev_specs->max_frm_size = HCLGEVF_MAC_MAX_FRAME; +} + +static int hclgevf_query_dev_specs(struct hclgevf_dev *hdev) +{ + struct hclge_desc desc[HCLGEVF_QUERY_DEV_SPECS_BD_NUM]; + int ret; + int i; + + /* set default specifications as devices lower than version V3 do not + * support querying specifications from firmware. + */ + if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) { + hclgevf_set_default_dev_specs(hdev); + return 0; + } + + for (i = 0; i < HCLGEVF_QUERY_DEV_SPECS_BD_NUM - 1; i++) { + hclgevf_cmd_setup_basic_desc(&desc[i], + HCLGE_OPC_QUERY_DEV_SPECS, true); + desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + } + hclgevf_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true); + + ret = hclgevf_cmd_send(&hdev->hw, desc, HCLGEVF_QUERY_DEV_SPECS_BD_NUM); + if (ret) + return ret; + + hclgevf_parse_dev_specs(hdev, desc); + hclgevf_check_dev_specs(hdev); + + return 0; +} + +static int hclgevf_pci_reset(struct hclgevf_dev *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + int ret = 0; + + if ((hdev->reset_type == HNAE3_VF_FULL_RESET || + hdev->reset_type == HNAE3_FLR_RESET) && + test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { + hclgevf_misc_irq_uninit(hdev); + hclgevf_uninit_msi(hdev); + clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); + } + + if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { + pci_set_master(pdev); + ret = hclgevf_init_msi(hdev); + if (ret) { + dev_err(&pdev->dev, + "failed(%d) to init MSI/MSI-X\n", ret); + return ret; + } + + ret = hclgevf_misc_irq_init(hdev); + if (ret) { + hclgevf_uninit_msi(hdev); + dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", + ret); + return ret; + } + + set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); + } + + return ret; +} + +static int hclgevf_clear_vport_list(struct hclgevf_dev *hdev) +{ + struct hclge_vf_to_pf_msg send_msg; + + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_HANDLE_VF_TBL, + HCLGE_MBX_VPORT_LIST_CLEAR); + return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); +} + +static void hclgevf_init_rxd_adv_layout(struct hclgevf_dev *hdev) +{ + if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev)) + hclgevf_write_dev(&hdev->hw, HCLGEVF_RXD_ADV_LAYOUT_EN_REG, 1); +} + +static void hclgevf_uninit_rxd_adv_layout(struct hclgevf_dev *hdev) +{ + if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev)) + hclgevf_write_dev(&hdev->hw, HCLGEVF_RXD_ADV_LAYOUT_EN_REG, 0); +} + +static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + int ret; + + ret = hclgevf_pci_reset(hdev); + if (ret) { + dev_err(&pdev->dev, "pci reset failed %d\n", ret); + return ret; + } + + hclgevf_arq_init(hdev); + ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, + &hdev->fw_version, false, + hdev->reset_pending); + if (ret) { + dev_err(&pdev->dev, "cmd failed %d\n", ret); + return ret; + } + + ret = hclgevf_rss_init_hw(hdev); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed(%d) to initialize RSS\n", ret); + return ret; + } + + ret = hclgevf_config_gro(hdev); + if (ret) + return ret; + + ret = hclgevf_init_vlan_config(hdev); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed(%d) to initialize VLAN config\n", ret); + return ret; + } + + /* get current port based vlan state from PF */ + ret = hclgevf_get_port_base_vlan_filter_state(hdev); + if (ret) + return ret; + + set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); + + hclgevf_init_rxd_adv_layout(hdev); + + dev_info(&hdev->pdev->dev, "Reset done\n"); + + return 0; +} + +static int hclgevf_init_hdev(struct hclgevf_dev *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + int ret; + + ret = hclgevf_pci_init(hdev); + if (ret) + return ret; + + ret = hclgevf_devlink_init(hdev); + if (ret) + goto err_devlink_init; + + ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw); + if (ret) + goto err_cmd_queue_init; + + hclgevf_arq_init(hdev); + ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, + &hdev->fw_version, false, + hdev->reset_pending); + if (ret) + goto err_cmd_init; + + /* Get vf resource */ + ret = hclgevf_query_vf_resource(hdev); + if (ret) + goto err_cmd_init; + + ret = hclgevf_query_dev_specs(hdev); + if (ret) { + dev_err(&pdev->dev, + "failed to query dev specifications, ret = %d\n", ret); + goto err_cmd_init; + } + + ret = hclgevf_init_msi(hdev); + if (ret) { + dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret); + goto err_cmd_init; + } + + hclgevf_state_init(hdev); + hdev->reset_level = HNAE3_VF_FUNC_RESET; + hdev->reset_type = HNAE3_NONE_RESET; + + ret = hclgevf_misc_irq_init(hdev); + if (ret) + goto err_misc_irq_init; + + set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); + + ret = hclgevf_configure(hdev); + if (ret) { + dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret); + goto err_config; + } + + ret = hclgevf_alloc_tqps(hdev); + if (ret) { + dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret); + goto err_config; + } + + ret = hclgevf_set_handle_info(hdev); + if (ret) + goto err_config; + + ret = hclgevf_config_gro(hdev); + if (ret) + goto err_config; + + /* Initialize RSS for this VF */ + ret = hclge_comm_rss_init_cfg(&hdev->nic, hdev->ae_dev, + &hdev->rss_cfg); + if (ret) { + dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret); + goto err_config; + } + + ret = hclgevf_rss_init_hw(hdev); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed(%d) to initialize RSS\n", ret); + goto err_config; + } + + /* ensure vf tbl list as empty before init */ + ret = hclgevf_clear_vport_list(hdev); + if (ret) { + dev_err(&pdev->dev, + "failed to clear tbl list configuration, ret = %d.\n", + ret); + goto err_config; + } + + ret = hclgevf_init_vlan_config(hdev); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed(%d) to initialize VLAN config\n", ret); + goto err_config; + } + + hclgevf_init_rxd_adv_layout(hdev); + + set_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state); + + hdev->last_reset_time = jiffies; + dev_info(&hdev->pdev->dev, "finished initializing %s driver\n", + HCLGEVF_DRIVER_NAME); + + hclgevf_task_schedule(hdev, round_jiffies_relative(HZ)); + timer_setup(&hdev->reset_timer, hclgevf_reset_timer, 0); + + return 0; + +err_config: + hclgevf_misc_irq_uninit(hdev); +err_misc_irq_init: + hclgevf_state_uninit(hdev); + hclgevf_uninit_msi(hdev); +err_cmd_init: + hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw); +err_cmd_queue_init: + hclgevf_devlink_uninit(hdev); +err_devlink_init: + hclgevf_pci_uninit(hdev); + clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); + return ret; +} + +static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) +{ + struct hclge_vf_to_pf_msg send_msg; + + hclgevf_state_uninit(hdev); + hclgevf_uninit_rxd_adv_layout(hdev); + + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_VF_UNINIT, 0); + hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); + + if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { + hclgevf_misc_irq_uninit(hdev); + hclgevf_uninit_msi(hdev); + } + + hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw); + hclgevf_devlink_uninit(hdev); + hclgevf_pci_uninit(hdev); + hclgevf_uninit_mac_list(hdev); +} + +static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) +{ + struct pci_dev *pdev = ae_dev->pdev; + int ret; + + ret = hclgevf_alloc_hdev(ae_dev); + if (ret) { + dev_err(&pdev->dev, "hclge device allocation failed\n"); + return ret; + } + + ret = hclgevf_init_hdev(ae_dev->priv); + if (ret) { + dev_err(&pdev->dev, "hclge device initialization failed\n"); + return ret; + } + + return 0; +} + +static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) +{ + struct hclgevf_dev *hdev = ae_dev->priv; + + hclgevf_uninit_hdev(hdev); + ae_dev->priv = NULL; +} + +static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev) +{ + struct hnae3_handle *nic = &hdev->nic; + struct hnae3_knic_private_info *kinfo = &nic->kinfo; + + return min_t(u32, hdev->rss_size_max, + hdev->num_tqps / kinfo->tc_info.num_tc); +} + +/** + * hclgevf_get_channels - Get the current channels enabled and max supported. + * @handle: hardware information for network interface + * @ch: ethtool channels structure + * + * We don't support separate tx and rx queues as channels. The other count + * represents how many queues are being used for control. max_combined counts + * how many queue pairs we can support. They may not be mapped 1 to 1 with + * q_vectors since we support a lot more queue pairs than q_vectors. + **/ +static void hclgevf_get_channels(struct hnae3_handle *handle, + struct ethtool_channels *ch) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + ch->max_combined = hclgevf_get_max_channels(hdev); + ch->other_count = 0; + ch->max_other = 0; + ch->combined_count = handle->kinfo.rss_size; +} + +static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle, + u16 *alloc_tqps, u16 *max_rss_size) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + *alloc_tqps = hdev->num_tqps; + *max_rss_size = hdev->rss_size_max; +} + +static void hclgevf_update_rss_size(struct hnae3_handle *handle, + u32 new_tqps_num) +{ + struct hnae3_knic_private_info *kinfo = &handle->kinfo; + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + u16 max_rss_size; + + kinfo->req_rss_size = new_tqps_num; + + max_rss_size = min_t(u16, hdev->rss_size_max, + hdev->num_tqps / kinfo->tc_info.num_tc); + + /* Use the user's configuration when it is not larger than + * max_rss_size, otherwise, use the maximum specification value. + */ + if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size && + kinfo->req_rss_size <= max_rss_size) + kinfo->rss_size = kinfo->req_rss_size; + else if (kinfo->rss_size > max_rss_size || + (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) + kinfo->rss_size = max_rss_size; + + kinfo->num_tqps = kinfo->tc_info.num_tc * kinfo->rss_size; +} + +static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num, + bool rxfh_configured) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hnae3_knic_private_info *kinfo = &handle->kinfo; + u16 tc_offset[HCLGE_COMM_MAX_TC_NUM]; + u16 tc_valid[HCLGE_COMM_MAX_TC_NUM]; + u16 tc_size[HCLGE_COMM_MAX_TC_NUM]; + u16 cur_rss_size = kinfo->rss_size; + u16 cur_tqps = kinfo->num_tqps; + u32 *rss_indir; + unsigned int i; + int ret; + + hclgevf_update_rss_size(handle, new_tqps_num); + + hclge_comm_get_rss_tc_info(kinfo->rss_size, hdev->hw_tc_map, + tc_offset, tc_valid, tc_size); + ret = hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, + tc_valid, tc_size); + if (ret) + return ret; + + /* RSS indirection table has been configured by user */ + if (rxfh_configured) + goto out; + + /* Reinitializes the rss indirect table according to the new RSS size */ + rss_indir = kcalloc(hdev->ae_dev->dev_specs.rss_ind_tbl_size, + sizeof(u32), GFP_KERNEL); + if (!rss_indir) + return -ENOMEM; + + for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++) + rss_indir[i] = i % kinfo->rss_size; + + hdev->rss_cfg.rss_size = kinfo->rss_size; + + ret = hclgevf_set_rss(handle, rss_indir, NULL, 0); + if (ret) + dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n", + ret); + + kfree(rss_indir); + +out: + if (!ret) + dev_info(&hdev->pdev->dev, + "Channels changed, rss_size from %u to %u, tqps from %u to %u", + cur_rss_size, kinfo->rss_size, + cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc); + + return ret; +} + +static int hclgevf_get_status(struct hnae3_handle *handle) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + return hdev->hw.mac.link; +} + +static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle, + u8 *auto_neg, u32 *speed, + u8 *duplex, u32 *lane_num) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + if (speed) + *speed = hdev->hw.mac.speed; + if (duplex) + *duplex = hdev->hw.mac.duplex; + if (auto_neg) + *auto_neg = AUTONEG_DISABLE; +} + +void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, + u8 duplex) +{ + hdev->hw.mac.speed = speed; + hdev->hw.mac.duplex = duplex; +} + +static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + bool gro_en_old = hdev->gro_en; + int ret; + + hdev->gro_en = enable; + ret = hclgevf_config_gro(hdev); + if (ret) + hdev->gro_en = gro_en_old; + + return ret; +} + +static void hclgevf_get_media_type(struct hnae3_handle *handle, u8 *media_type, + u8 *module_type) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + if (media_type) + *media_type = hdev->hw.mac.media_type; + + if (module_type) + *module_type = hdev->hw.mac.module_type; +} + +static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); +} + +static bool hclgevf_get_cmdq_stat(struct hnae3_handle *handle) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + return test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); +} + +static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); +} + +static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + return hdev->rst_stats.hw_rst_done_cnt; +} + +static void hclgevf_get_link_mode(struct hnae3_handle *handle, + unsigned long *supported, + unsigned long *advertising) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + *supported = hdev->hw.mac.supported; + *advertising = hdev->hw.mac.advertising; +} + +#define MAX_SEPARATE_NUM 4 +#define SEPARATOR_VALUE 0xFDFCFBFA +#define REG_NUM_PER_LINE 4 +#define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32)) + +static int hclgevf_get_regs_len(struct hnae3_handle *handle) +{ + int cmdq_lines, common_lines, ring_lines, tqp_intr_lines; + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1; + common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1; + ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1; + tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1; + + return (cmdq_lines + common_lines + ring_lines * hdev->num_tqps + + tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE; +} + +static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, + void *data) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + int i, j, reg_um, separator_num; + u32 *reg = data; + + *version = hdev->fw_version; + + /* fetching per-VF registers values from VF PCIe register space */ + reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32); + separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; + for (i = 0; i < reg_um; i++) + *reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]); + for (i = 0; i < separator_num; i++) + *reg++ = SEPARATOR_VALUE; + + reg_um = sizeof(common_reg_addr_list) / sizeof(u32); + separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; + for (i = 0; i < reg_um; i++) + *reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]); + for (i = 0; i < separator_num; i++) + *reg++ = SEPARATOR_VALUE; + + reg_um = sizeof(ring_reg_addr_list) / sizeof(u32); + separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; + for (j = 0; j < hdev->num_tqps; j++) { + for (i = 0; i < reg_um; i++) + *reg++ = hclgevf_read_dev(&hdev->hw, + ring_reg_addr_list[i] + + HCLGEVF_TQP_REG_SIZE * j); + for (i = 0; i < separator_num; i++) + *reg++ = SEPARATOR_VALUE; + } + + reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32); + separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; + for (j = 0; j < hdev->num_msi_used - 1; j++) { + for (i = 0; i < reg_um; i++) + *reg++ = hclgevf_read_dev(&hdev->hw, + tqp_intr_reg_addr_list[i] + + 4 * j); + for (i = 0; i < separator_num; i++) + *reg++ = SEPARATOR_VALUE; + } +} + +void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state, + struct hclge_mbx_port_base_vlan *port_base_vlan) +{ + struct hnae3_handle *nic = &hdev->nic; + struct hclge_vf_to_pf_msg send_msg; + int ret; + + rtnl_lock(); + + if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || + test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) { + dev_warn(&hdev->pdev->dev, + "is resetting when updating port based vlan info\n"); + rtnl_unlock(); + return; + } + + ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); + if (ret) { + rtnl_unlock(); + return; + } + + /* send msg to PF and wait update port based vlan info */ + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, + HCLGE_MBX_PORT_BASE_VLAN_CFG); + memcpy(send_msg.data, port_base_vlan, sizeof(*port_base_vlan)); + ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); + if (!ret) { + if (state == HNAE3_PORT_BASE_VLAN_DISABLE) + nic->port_base_vlan_state = state; + else + nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE; + } + + hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); + rtnl_unlock(); +} + +static const struct hnae3_ae_ops hclgevf_ops = { + .init_ae_dev = hclgevf_init_ae_dev, + .uninit_ae_dev = hclgevf_uninit_ae_dev, + .reset_prepare = hclgevf_reset_prepare_general, + .reset_done = hclgevf_reset_done, + .init_client_instance = hclgevf_init_client_instance, + .uninit_client_instance = hclgevf_uninit_client_instance, + .start = hclgevf_ae_start, + .stop = hclgevf_ae_stop, + .client_start = hclgevf_client_start, + .client_stop = hclgevf_client_stop, + .map_ring_to_vector = hclgevf_map_ring_to_vector, + .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector, + .get_vector = hclgevf_get_vector, + .put_vector = hclgevf_put_vector, + .reset_queue = hclgevf_reset_tqp, + .get_mac_addr = hclgevf_get_mac_addr, + .set_mac_addr = hclgevf_set_mac_addr, + .add_uc_addr = hclgevf_add_uc_addr, + .rm_uc_addr = hclgevf_rm_uc_addr, + .add_mc_addr = hclgevf_add_mc_addr, + .rm_mc_addr = hclgevf_rm_mc_addr, + .get_stats = hclgevf_get_stats, + .update_stats = hclgevf_update_stats, + .get_strings = hclgevf_get_strings, + .get_sset_count = hclgevf_get_sset_count, + .get_rss_key_size = hclge_comm_get_rss_key_size, + .get_rss = hclgevf_get_rss, + .set_rss = hclgevf_set_rss, + .get_rss_tuple = hclgevf_get_rss_tuple, + .set_rss_tuple = hclgevf_set_rss_tuple, + .get_tc_size = hclgevf_get_tc_size, + .get_fw_version = hclgevf_get_fw_version, + .set_vlan_filter = hclgevf_set_vlan_filter, + .enable_vlan_filter = hclgevf_enable_vlan_filter, + .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag, + .reset_event = hclgevf_reset_event, + .set_default_reset_request = hclgevf_set_def_reset_request, + .set_channels = hclgevf_set_channels, + .get_channels = hclgevf_get_channels, + .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, + .get_regs_len = hclgevf_get_regs_len, + .get_regs = hclgevf_get_regs, + .get_status = hclgevf_get_status, + .get_ksettings_an_result = hclgevf_get_ksettings_an_result, + .get_media_type = hclgevf_get_media_type, + .get_hw_reset_stat = hclgevf_get_hw_reset_stat, + .ae_dev_resetting = hclgevf_ae_dev_resetting, + .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt, + .set_gro_en = hclgevf_gro_en, + .set_mtu = hclgevf_set_mtu, + .get_global_queue_id = hclgevf_get_qid_global, + .set_timer_task = hclgevf_set_timer_task, + .get_link_mode = hclgevf_get_link_mode, + .set_promisc_mode = hclgevf_set_promisc_mode, + .request_update_promisc_mode = hclgevf_request_update_promisc_mode, + .get_cmdq_stat = hclgevf_get_cmdq_stat, +}; + +static struct hnae3_ae_algo ae_algovf = { + .ops = &hclgevf_ops, + .pdev_id_table = ae_algovf_pci_tbl, +}; + +static int __init hclgevf_init(void) +{ + pr_info("%s is initializing\n", HCLGEVF_NAME); + + hclgevf_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGEVF_NAME); + if (!hclgevf_wq) { + pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME); + return -ENOMEM; + } + + hnae3_register_ae_algo(&ae_algovf); + + return 0; +} + +static void __exit hclgevf_exit(void) +{ + hnae3_unregister_ae_algo(&ae_algovf); + destroy_workqueue(hclgevf_wq); +} +module_init(hclgevf_init); +module_exit(hclgevf_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Huawei Tech. Co., Ltd."); +MODULE_DESCRIPTION("HCLGEVF Driver"); +MODULE_VERSION(HCLGEVF_MOD_VERSION); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h new file mode 100644 index 000000000..d65ace07b --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h @@ -0,0 +1,298 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2016-2017 Hisilicon Limited. */ + +#ifndef __HCLGEVF_MAIN_H +#define __HCLGEVF_MAIN_H +#include +#include +#include +#include +#include "hclge_mbx.h" +#include "hclgevf_cmd.h" +#include "hnae3.h" +#include "hclge_comm_rss.h" +#include "hclge_comm_tqp_stats.h" + +#define HCLGEVF_MOD_VERSION "1.0" +#define HCLGEVF_DRIVER_NAME "hclgevf" + +#define HCLGEVF_MAX_VLAN_ID 4095 +#define HCLGEVF_MISC_VECTOR_NUM 0 + +#define HCLGEVF_INVALID_VPORT 0xffff +#define HCLGEVF_GENERAL_TASK_INTERVAL 5 +#define HCLGEVF_KEEP_ALIVE_TASK_INTERVAL 2 + +/* This number in actual depends upon the total number of VFs + * created by physical function. But the maximum number of + * possible vector-per-VF is {VFn(1-32), VECTn(32 + 1)}. + */ +#define HCLGEVF_MAX_VF_VECTOR_NUM (32 + 1) + +#define HCLGEVF_VECTOR_REG_BASE 0x20000 +#define HCLGEVF_MISC_VECTOR_REG_BASE 0x20400 +#define HCLGEVF_VECTOR_REG_OFFSET 0x4 +#define HCLGEVF_VECTOR_VF_OFFSET 0x100000 + +/* bar registers for common func */ +#define HCLGEVF_GRO_EN_REG 0x28000 +#define HCLGEVF_RXD_ADV_LAYOUT_EN_REG 0x28008 + +/* bar registers for rcb */ +#define HCLGEVF_RING_RX_ADDR_L_REG 0x80000 +#define HCLGEVF_RING_RX_ADDR_H_REG 0x80004 +#define HCLGEVF_RING_RX_BD_NUM_REG 0x80008 +#define HCLGEVF_RING_RX_BD_LENGTH_REG 0x8000C +#define HCLGEVF_RING_RX_MERGE_EN_REG 0x80014 +#define HCLGEVF_RING_RX_TAIL_REG 0x80018 +#define HCLGEVF_RING_RX_HEAD_REG 0x8001C +#define HCLGEVF_RING_RX_FBD_NUM_REG 0x80020 +#define HCLGEVF_RING_RX_OFFSET_REG 0x80024 +#define HCLGEVF_RING_RX_FBD_OFFSET_REG 0x80028 +#define HCLGEVF_RING_RX_STASH_REG 0x80030 +#define HCLGEVF_RING_RX_BD_ERR_REG 0x80034 +#define HCLGEVF_RING_TX_ADDR_L_REG 0x80040 +#define HCLGEVF_RING_TX_ADDR_H_REG 0x80044 +#define HCLGEVF_RING_TX_BD_NUM_REG 0x80048 +#define HCLGEVF_RING_TX_PRIORITY_REG 0x8004C +#define HCLGEVF_RING_TX_TC_REG 0x80050 +#define HCLGEVF_RING_TX_MERGE_EN_REG 0x80054 +#define HCLGEVF_RING_TX_TAIL_REG 0x80058 +#define HCLGEVF_RING_TX_HEAD_REG 0x8005C +#define HCLGEVF_RING_TX_FBD_NUM_REG 0x80060 +#define HCLGEVF_RING_TX_OFFSET_REG 0x80064 +#define HCLGEVF_RING_TX_EBD_NUM_REG 0x80068 +#define HCLGEVF_RING_TX_EBD_OFFSET_REG 0x80070 +#define HCLGEVF_RING_TX_BD_ERR_REG 0x80074 +#define HCLGEVF_RING_EN_REG 0x80090 + +/* bar registers for tqp interrupt */ +#define HCLGEVF_TQP_INTR_CTRL_REG 0x20000 +#define HCLGEVF_TQP_INTR_GL0_REG 0x20100 +#define HCLGEVF_TQP_INTR_GL1_REG 0x20200 +#define HCLGEVF_TQP_INTR_GL2_REG 0x20300 +#define HCLGEVF_TQP_INTR_RL_REG 0x20900 + +/* CMDQ register bits for RX event(=MBX event) */ +#define HCLGEVF_VECTOR0_RX_CMDQ_INT_B 1 +/* RST register bits for RESET event */ +#define HCLGEVF_VECTOR0_RST_INT_B 2 + +#define HCLGEVF_TQP_RESET_TRY_TIMES 10 +/* Reset related Registers */ +#define HCLGEVF_RST_ING 0x20C00 +#define HCLGEVF_FUN_RST_ING_BIT BIT(0) +#define HCLGEVF_GLOBAL_RST_ING_BIT BIT(5) +#define HCLGEVF_CORE_RST_ING_BIT BIT(6) +#define HCLGEVF_IMP_RST_ING_BIT BIT(7) +#define HCLGEVF_RST_ING_BITS \ + (HCLGEVF_FUN_RST_ING_BIT | HCLGEVF_GLOBAL_RST_ING_BIT | \ + HCLGEVF_CORE_RST_ING_BIT | HCLGEVF_IMP_RST_ING_BIT) + +#define HCLGEVF_VF_RST_ING 0x07008 +#define HCLGEVF_VF_RST_ING_BIT BIT(16) + +#define HCLGEVF_WAIT_RESET_DONE 100 + +#define HCLGEVF_RSS_IND_TBL_SIZE 512 + +#define HCLGEVF_TQP_MEM_SIZE 0x10000 +#define HCLGEVF_MEM_BAR 4 +/* in the bar4, the first half is for roce, and the second half is for nic */ +#define HCLGEVF_NIC_MEM_OFFSET(hdev) \ + (pci_resource_len((hdev)->pdev, HCLGEVF_MEM_BAR) >> 1) +#define HCLGEVF_TQP_MEM_OFFSET(hdev, i) \ + (HCLGEVF_NIC_MEM_OFFSET(hdev) + HCLGEVF_TQP_MEM_SIZE * (i)) + +#define HCLGEVF_MAC_MAX_FRAME 9728 + +#define HCLGEVF_STATS_TIMER_INTERVAL 36U + +#define hclgevf_read_dev(a, reg) \ + hclge_comm_read_reg((a)->hw.io_base, reg) +#define hclgevf_write_dev(a, reg, value) \ + hclge_comm_write_reg((a)->hw.io_base, reg, value) + +enum hclgevf_evt_cause { + HCLGEVF_VECTOR0_EVENT_RST, + HCLGEVF_VECTOR0_EVENT_MBX, + HCLGEVF_VECTOR0_EVENT_OTHER, +}; + +/* states of hclgevf device & tasks */ +enum hclgevf_states { + /* device states */ + HCLGEVF_STATE_DOWN, + HCLGEVF_STATE_DISABLED, + HCLGEVF_STATE_IRQ_INITED, + HCLGEVF_STATE_REMOVING, + HCLGEVF_STATE_NIC_REGISTERED, + HCLGEVF_STATE_ROCE_REGISTERED, + HCLGEVF_STATE_SERVICE_INITED, + /* task states */ + HCLGEVF_STATE_RST_SERVICE_SCHED, + HCLGEVF_STATE_RST_HANDLING, + HCLGEVF_STATE_MBX_SERVICE_SCHED, + HCLGEVF_STATE_MBX_HANDLING, + HCLGEVF_STATE_LINK_UPDATING, + HCLGEVF_STATE_PROMISC_CHANGED, + HCLGEVF_STATE_RST_FAIL, + HCLGEVF_STATE_PF_PUSH_LINK_STATUS, +}; + +struct hclgevf_mac { + u8 media_type; + u8 module_type; + u8 mac_addr[ETH_ALEN]; + int link; + u8 duplex; + u32 speed; + u64 supported; + u64 advertising; +}; + +struct hclgevf_hw { + struct hclge_comm_hw hw; + int num_vec; + struct hclgevf_mac mac; +}; + +struct hclgevf_cfg { + u8 tc_num; + u16 tqp_desc_num; + u16 rx_buf_len; + u8 phy_addr; + u8 media_type; + u8 mac_addr[ETH_ALEN]; + u32 numa_node_map; +}; + +struct hclgevf_misc_vector { + u8 __iomem *addr; + int vector_irq; + char name[HNAE3_INT_NAME_LEN]; +}; + +struct hclgevf_rst_stats { + u32 rst_cnt; /* the number of reset */ + u32 vf_func_rst_cnt; /* the number of VF function reset */ + u32 flr_rst_cnt; /* the number of FLR */ + u32 vf_rst_cnt; /* the number of VF reset */ + u32 rst_done_cnt; /* the number of reset completed */ + u32 hw_rst_done_cnt; /* the number of HW reset completed */ + u32 rst_fail_cnt; /* the number of VF reset fail */ +}; + +enum HCLGEVF_MAC_ADDR_TYPE { + HCLGEVF_MAC_ADDR_UC, + HCLGEVF_MAC_ADDR_MC +}; + +enum HCLGEVF_MAC_NODE_STATE { + HCLGEVF_MAC_TO_ADD, + HCLGEVF_MAC_TO_DEL, + HCLGEVF_MAC_ACTIVE +}; + +struct hclgevf_mac_addr_node { + struct list_head node; + enum HCLGEVF_MAC_NODE_STATE state; + u8 mac_addr[ETH_ALEN]; +}; + +struct hclgevf_mac_table_cfg { + spinlock_t mac_list_lock; /* protect mac address need to add/detele */ + struct list_head uc_mac_list; + struct list_head mc_mac_list; +}; + +struct hclgevf_dev { + struct pci_dev *pdev; + struct hnae3_ae_dev *ae_dev; + struct hclgevf_hw hw; + struct hclgevf_misc_vector misc_vector; + struct hclge_comm_rss_cfg rss_cfg; + unsigned long state; + unsigned long flr_state; + unsigned long default_reset_request; + unsigned long last_reset_time; + enum hnae3_reset_type reset_level; + unsigned long reset_pending; + enum hnae3_reset_type reset_type; + struct timer_list reset_timer; + +#define HCLGEVF_RESET_REQUESTED 0 +#define HCLGEVF_RESET_PENDING 1 + unsigned long reset_state; /* requested, pending */ + struct hclgevf_rst_stats rst_stats; + u32 reset_attempts; + struct semaphore reset_sem; /* protect reset process */ + + u32 fw_version; + u16 mbx_api_version; + u16 num_tqps; /* num task queue pairs of this VF */ + + u16 alloc_rss_size; /* allocated RSS task queue */ + u16 rss_size_max; /* HW defined max RSS task queue */ + + u16 num_alloc_vport; /* num vports this driver supports */ + u32 numa_node_mask; + u16 rx_buf_len; + u16 num_tx_desc; /* desc num of per tx queue */ + u16 num_rx_desc; /* desc num of per rx queue */ + u8 hw_tc_map; + u8 has_pf_mac; + + u16 num_msi; + u16 num_msi_left; + u16 num_msi_used; + u16 num_nic_msix; /* Num of nic vectors for this VF */ + u16 num_roce_msix; /* Num of roce vectors for this VF */ + u16 roce_base_msix_offset; + u16 *vector_status; + int *vector_irq; + + bool gro_en; + + unsigned long vlan_del_fail_bmap[BITS_TO_LONGS(VLAN_N_VID)]; + + struct hclgevf_mac_table_cfg mac_table; + + struct hclgevf_mbx_resp_status mbx_resp; /* mailbox response */ + struct hclgevf_mbx_arq_ring arq; /* mailbox async rx queue */ + + struct delayed_work service_task; + + struct hclge_comm_tqp *htqp; + + struct hnae3_handle nic; + struct hnae3_handle roce; + + struct hnae3_client *nic_client; + struct hnae3_client *roce_client; + u32 flag; + unsigned long serv_processed_cnt; + unsigned long last_serv_processed; + + struct devlink *devlink; +}; + +static inline bool hclgevf_is_reset_pending(struct hclgevf_dev *hdev) +{ + return !!hdev->reset_pending; +} + +int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, + struct hclge_vf_to_pf_msg *send_msg, bool need_resp, + u8 *resp_data, u16 resp_len); +void hclgevf_mbx_handler(struct hclgevf_dev *hdev); +void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev); + +void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state); +void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, + u8 duplex); +void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev); +void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev); +void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state, + struct hclge_mbx_port_base_vlan *port_base_vlan); +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c new file mode 100644 index 000000000..85c2a634c --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c @@ -0,0 +1,388 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. + +#include "hclge_mbx.h" +#include "hclgevf_main.h" +#include "hnae3.h" + +#define CREATE_TRACE_POINTS +#include "hclgevf_trace.h" + +static int hclgevf_resp_to_errno(u16 resp_code) +{ + return resp_code ? -resp_code : 0; +} + +#define HCLGEVF_MBX_MATCH_ID_START 1 +static void hclgevf_reset_mbx_resp_status(struct hclgevf_dev *hdev) +{ + /* this function should be called with mbx_resp.mbx_mutex held + * to protect the received_response from race condition + */ + hdev->mbx_resp.received_resp = false; + hdev->mbx_resp.origin_mbx_msg = 0; + hdev->mbx_resp.resp_status = 0; + hdev->mbx_resp.match_id++; + /* Update match_id and ensure the value of match_id is not zero */ + if (hdev->mbx_resp.match_id == 0) + hdev->mbx_resp.match_id = HCLGEVF_MBX_MATCH_ID_START; + memset(hdev->mbx_resp.additional_info, 0, HCLGE_MBX_MAX_RESP_DATA_SIZE); +} + +/* hclgevf_get_mbx_resp: used to get a response from PF after VF sends a mailbox + * message to PF. + * @hdev: pointer to struct hclgevf_dev + * @code0: the message opcode VF send to PF. + * @code1: the message sub-opcode VF send to PF. + * @resp_data: pointer to store response data from PF to VF. + * @resp_len: the length of resp_data from PF to VF. + */ +static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1, + u8 *resp_data, u16 resp_len) +{ +#define HCLGEVF_MAX_TRY_TIMES 500 +#define HCLGEVF_SLEEP_USECOND 1000 + struct hclgevf_mbx_resp_status *mbx_resp; + u16 r_code0, r_code1; + int i = 0; + + if (resp_len > HCLGE_MBX_MAX_RESP_DATA_SIZE) { + dev_err(&hdev->pdev->dev, + "VF mbx response len(=%u) exceeds maximum(=%u)\n", + resp_len, + HCLGE_MBX_MAX_RESP_DATA_SIZE); + return -EINVAL; + } + + while ((!hdev->mbx_resp.received_resp) && (i < HCLGEVF_MAX_TRY_TIMES)) { + if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, + &hdev->hw.hw.comm_state)) + return -EIO; + + usleep_range(HCLGEVF_SLEEP_USECOND, HCLGEVF_SLEEP_USECOND * 2); + i++; + } + + /* ensure additional_info will be seen after received_resp */ + smp_rmb(); + + if (i >= HCLGEVF_MAX_TRY_TIMES) { + dev_err(&hdev->pdev->dev, + "VF could not get mbx(%u,%u) resp(=%d) from PF in %d tries\n", + code0, code1, hdev->mbx_resp.received_resp, i); + return -EIO; + } + + mbx_resp = &hdev->mbx_resp; + r_code0 = (u16)(mbx_resp->origin_mbx_msg >> 16); + r_code1 = (u16)(mbx_resp->origin_mbx_msg & 0xff); + + if (mbx_resp->resp_status) + return mbx_resp->resp_status; + + if (resp_data) + memcpy(resp_data, &mbx_resp->additional_info[0], resp_len); + + hclgevf_reset_mbx_resp_status(hdev); + + if (!(r_code0 == code0 && r_code1 == code1 && !mbx_resp->resp_status)) { + dev_err(&hdev->pdev->dev, + "VF could not match resp code(code0=%u,code1=%u), %d\n", + code0, code1, mbx_resp->resp_status); + dev_err(&hdev->pdev->dev, + "VF could not match resp r_code(r_code0=%u,r_code1=%u)\n", + r_code0, r_code1); + return -EIO; + } + + return 0; +} + +int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, + struct hclge_vf_to_pf_msg *send_msg, bool need_resp, + u8 *resp_data, u16 resp_len) +{ + struct hclge_mbx_vf_to_pf_cmd *req; + struct hclge_desc desc; + int status; + + req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; + + if (!send_msg) { + dev_err(&hdev->pdev->dev, + "failed to send mbx, msg is NULL\n"); + return -EINVAL; + } + + hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false); + if (need_resp) + hnae3_set_bit(req->mbx_need_resp, HCLGE_MBX_NEED_RESP_B, 1); + + memcpy(&req->msg, send_msg, sizeof(struct hclge_vf_to_pf_msg)); + + if (test_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state)) + trace_hclge_vf_mbx_send(hdev, req); + + /* synchronous send */ + if (need_resp) { + mutex_lock(&hdev->mbx_resp.mbx_mutex); + hclgevf_reset_mbx_resp_status(hdev); + req->match_id = cpu_to_le16(hdev->mbx_resp.match_id); + status = hclgevf_cmd_send(&hdev->hw, &desc, 1); + if (status) { + dev_err(&hdev->pdev->dev, + "VF failed(=%d) to send mbx message to PF\n", + status); + mutex_unlock(&hdev->mbx_resp.mbx_mutex); + return status; + } + + status = hclgevf_get_mbx_resp(hdev, send_msg->code, + send_msg->subcode, resp_data, + resp_len); + mutex_unlock(&hdev->mbx_resp.mbx_mutex); + } else { + /* asynchronous send */ + status = hclgevf_cmd_send(&hdev->hw, &desc, 1); + if (status) { + dev_err(&hdev->pdev->dev, + "VF failed(=%d) to send mbx message to PF\n", + status); + return status; + } + } + + return status; +} + +static bool hclgevf_cmd_crq_empty(struct hclgevf_hw *hw) +{ + u32 tail = hclgevf_read_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG); + + return tail == hw->hw.cmq.crq.next_to_use; +} + +static void hclgevf_handle_mbx_response(struct hclgevf_dev *hdev, + struct hclge_mbx_pf_to_vf_cmd *req) +{ + u16 vf_mbx_msg_subcode = le16_to_cpu(req->msg.vf_mbx_msg_subcode); + u16 vf_mbx_msg_code = le16_to_cpu(req->msg.vf_mbx_msg_code); + struct hclgevf_mbx_resp_status *resp = &hdev->mbx_resp; + u16 resp_status = le16_to_cpu(req->msg.resp_status); + u16 match_id = le16_to_cpu(req->match_id); + + if (resp->received_resp) + dev_warn(&hdev->pdev->dev, + "VF mbx resp flag not clear(%u)\n", + vf_mbx_msg_code); + + resp->origin_mbx_msg = (vf_mbx_msg_code << 16); + resp->origin_mbx_msg |= vf_mbx_msg_subcode; + resp->resp_status = hclgevf_resp_to_errno(resp_status); + memcpy(resp->additional_info, req->msg.resp_data, + HCLGE_MBX_MAX_RESP_DATA_SIZE * sizeof(u8)); + + /* ensure additional_info will be seen before setting received_resp */ + smp_wmb(); + + if (match_id) { + /* If match_id is not zero, it means PF support match_id. + * if the match_id is right, VF get the right response, or + * ignore the response. and driver will clear hdev->mbx_resp + * when send next message which need response. + */ + if (match_id == resp->match_id) + resp->received_resp = true; + } else { + resp->received_resp = true; + } +} + +static void hclgevf_handle_mbx_msg(struct hclgevf_dev *hdev, + struct hclge_mbx_pf_to_vf_cmd *req) +{ + /* we will drop the async msg if we find ARQ as full + * and continue with next message + */ + if (atomic_read(&hdev->arq.count) >= + HCLGE_MBX_MAX_ARQ_MSG_NUM) { + dev_warn(&hdev->pdev->dev, + "Async Q full, dropping msg(%u)\n", + le16_to_cpu(req->msg.code)); + return; + } + + /* tail the async message in arq */ + memcpy(hdev->arq.msg_q[hdev->arq.tail], &req->msg, + HCLGE_MBX_MAX_ARQ_MSG_SIZE * sizeof(u16)); + hclge_mbx_tail_ptr_move_arq(hdev->arq); + atomic_inc(&hdev->arq.count); + + hclgevf_mbx_task_schedule(hdev); +} + +void hclgevf_mbx_handler(struct hclgevf_dev *hdev) +{ + struct hclge_mbx_pf_to_vf_cmd *req; + struct hclge_comm_cmq_ring *crq; + struct hclge_desc *desc; + u16 flag; + u16 code; + + crq = &hdev->hw.hw.cmq.crq; + + while (!hclgevf_cmd_crq_empty(&hdev->hw)) { + if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, + &hdev->hw.hw.comm_state)) { + dev_info(&hdev->pdev->dev, "vf crq need init\n"); + return; + } + + desc = &crq->desc[crq->next_to_use]; + req = (struct hclge_mbx_pf_to_vf_cmd *)desc->data; + + flag = le16_to_cpu(crq->desc[crq->next_to_use].flag); + code = le16_to_cpu(req->msg.code); + if (unlikely(!hnae3_get_bit(flag, HCLGEVF_CMDQ_RX_OUTVLD_B))) { + dev_warn(&hdev->pdev->dev, + "dropped invalid mailbox message, code = %u\n", + code); + + /* dropping/not processing this invalid message */ + crq->desc[crq->next_to_use].flag = 0; + hclge_mbx_ring_ptr_move_crq(crq); + continue; + } + + trace_hclge_vf_mbx_get(hdev, req); + + /* synchronous messages are time critical and need preferential + * treatment. Therefore, we need to acknowledge all the sync + * responses as quickly as possible so that waiting tasks do not + * timeout and simultaneously queue the async messages for later + * prcessing in context of mailbox task i.e. the slow path. + */ + switch (code) { + case HCLGE_MBX_PF_VF_RESP: + hclgevf_handle_mbx_response(hdev, req); + break; + case HCLGE_MBX_LINK_STAT_CHANGE: + case HCLGE_MBX_ASSERTING_RESET: + case HCLGE_MBX_LINK_STAT_MODE: + case HCLGE_MBX_PUSH_VLAN_INFO: + case HCLGE_MBX_PUSH_PROMISC_INFO: + hclgevf_handle_mbx_msg(hdev, req); + break; + default: + dev_err(&hdev->pdev->dev, + "VF received unsupported(%u) mbx msg from PF\n", + code); + break; + } + crq->desc[crq->next_to_use].flag = 0; + hclge_mbx_ring_ptr_move_crq(crq); + } + + /* Write back CMDQ_RQ header pointer, M7 need this pointer */ + hclgevf_write_dev(&hdev->hw, HCLGE_COMM_NIC_CRQ_HEAD_REG, + crq->next_to_use); +} + +static void hclgevf_parse_promisc_info(struct hclgevf_dev *hdev, + u16 promisc_info) +{ + if (!promisc_info) + dev_info(&hdev->pdev->dev, + "Promisc mode is closed by host for being untrusted.\n"); +} + +void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) +{ + struct hclge_mbx_port_base_vlan *vlan_info; + struct hclge_mbx_link_status *link_info; + struct hclge_mbx_link_mode *link_mode; + enum hnae3_reset_type reset_type; + u16 link_status, state; + __le16 *msg_q; + u16 opcode; + u8 duplex; + u32 speed; + u32 tail; + u8 flag; + u16 idx; + + tail = hdev->arq.tail; + + /* process all the async queue messages */ + while (tail != hdev->arq.head) { + if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, + &hdev->hw.hw.comm_state)) { + dev_info(&hdev->pdev->dev, + "vf crq need init in async\n"); + return; + } + + msg_q = hdev->arq.msg_q[hdev->arq.head]; + opcode = le16_to_cpu(msg_q[0]); + switch (opcode) { + case HCLGE_MBX_LINK_STAT_CHANGE: + link_info = (struct hclge_mbx_link_status *)(msg_q + 1); + link_status = le16_to_cpu(link_info->link_status); + speed = le32_to_cpu(link_info->speed); + duplex = (u8)le16_to_cpu(link_info->duplex); + flag = link_info->flag; + + /* update upper layer with new link link status */ + hclgevf_update_speed_duplex(hdev, speed, duplex); + hclgevf_update_link_status(hdev, link_status); + + if (flag & HCLGE_MBX_PUSH_LINK_STATUS_EN) + set_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS, + &hdev->state); + + break; + case HCLGE_MBX_LINK_STAT_MODE: + link_mode = (struct hclge_mbx_link_mode *)(msg_q + 1); + idx = le16_to_cpu(link_mode->idx); + if (idx) + hdev->hw.mac.supported = + le64_to_cpu(link_mode->link_mode); + else + hdev->hw.mac.advertising = + le64_to_cpu(link_mode->link_mode); + break; + case HCLGE_MBX_ASSERTING_RESET: + /* PF has asserted reset hence VF should go in pending + * state and poll for the hardware reset status till it + * has been completely reset. After this stack should + * eventually be re-initialized. + */ + reset_type = + (enum hnae3_reset_type)le16_to_cpu(msg_q[1]); + set_bit(reset_type, &hdev->reset_pending); + set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); + hclgevf_reset_task_schedule(hdev); + + break; + case HCLGE_MBX_PUSH_VLAN_INFO: + vlan_info = + (struct hclge_mbx_port_base_vlan *)(msg_q + 1); + state = le16_to_cpu(vlan_info->state); + hclgevf_update_port_base_vlan_info(hdev, state, + vlan_info); + break; + case HCLGE_MBX_PUSH_PROMISC_INFO: + hclgevf_parse_promisc_info(hdev, le16_to_cpu(msg_q[1])); + break; + default: + dev_err(&hdev->pdev->dev, + "fetched unsupported(%u) message from arq\n", + opcode); + break; + } + + hclge_mbx_head_ptr_move_arq(hdev->arq); + atomic_dec(&hdev->arq.count); + msg_q = hdev->arq.msg_q[hdev->arq.head]; + } +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h new file mode 100644 index 000000000..5d4895bb5 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2018-2019 Hisilicon Limited. */ + +/* This must be outside ifdef _HCLGEVF_TRACE_H */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM hns3 + +#if !defined(_HCLGEVF_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) +#define _HCLGEVF_TRACE_H_ + +#include + +#define VF_GET_MBX_LEN (sizeof(struct hclge_mbx_pf_to_vf_cmd) / sizeof(u32)) +#define VF_SEND_MBX_LEN (sizeof(struct hclge_mbx_vf_to_pf_cmd) / sizeof(u32)) + +TRACE_EVENT(hclge_vf_mbx_get, + TP_PROTO( + struct hclgevf_dev *hdev, + struct hclge_mbx_pf_to_vf_cmd *req), + TP_ARGS(hdev, req), + + TP_STRUCT__entry( + __field(u8, vfid) + __field(u16, code) + __string(pciname, pci_name(hdev->pdev)) + __string(devname, &hdev->nic.kinfo.netdev->name) + __array(u32, mbx_data, VF_GET_MBX_LEN) + ), + + TP_fast_assign( + __entry->vfid = req->dest_vfid; + __entry->code = le16_to_cpu(req->msg.code); + __assign_str(pciname, pci_name(hdev->pdev)); + __assign_str(devname, &hdev->nic.kinfo.netdev->name); + memcpy(__entry->mbx_data, req, + sizeof(struct hclge_mbx_pf_to_vf_cmd)); + ), + + TP_printk( + "%s %s vfid:%u code:%u data:%s", + __get_str(pciname), __get_str(devname), __entry->vfid, + __entry->code, + __print_array(__entry->mbx_data, VF_GET_MBX_LEN, sizeof(u32)) + ) +); + +TRACE_EVENT(hclge_vf_mbx_send, + TP_PROTO( + struct hclgevf_dev *hdev, + struct hclge_mbx_vf_to_pf_cmd *req), + TP_ARGS(hdev, req), + + TP_STRUCT__entry( + __field(u8, vfid) + __field(u8, code) + __field(u8, subcode) + __string(pciname, pci_name(hdev->pdev)) + __string(devname, &hdev->nic.kinfo.netdev->name) + __array(u32, mbx_data, VF_SEND_MBX_LEN) + ), + + TP_fast_assign( + __entry->vfid = req->mbx_src_vfid; + __entry->code = req->msg.code; + __entry->subcode = req->msg.subcode; + __assign_str(pciname, pci_name(hdev->pdev)); + __assign_str(devname, &hdev->nic.kinfo.netdev->name); + memcpy(__entry->mbx_data, req, + sizeof(struct hclge_mbx_vf_to_pf_cmd)); + ), + + TP_printk( + "%s %s vfid:%u code:%u subcode:%u data:%s", + __get_str(pciname), __get_str(devname), __entry->vfid, + __entry->code, __entry->subcode, + __print_array(__entry->mbx_data, VF_SEND_MBX_LEN, sizeof(u32)) + ) +); + +#endif /* _HCLGEVF_TRACE_H_ */ + +/* This must be outside ifdef _HCLGEVF_TRACE_H */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE hclgevf_trace +#include -- cgit v1.2.3