summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/hisilicon
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
commitace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch)
treeb2d64bc10158fdd5497876388cd68142ca374ed3 /drivers/net/ethernet/hisilicon
parentInitial commit. (diff)
downloadlinux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz
linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/net/ethernet/hisilicon')
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig144
-rw-r--r--drivers/net/ethernet/hisilicon/Makefile11
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c1060
-rw-r--r--drivers/net/ethernet/hisilicon/hisi_femac.c976
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c1329
-rw-r--r--drivers/net/ethernet/hisilicon/hns/Makefile13
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.c467
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.h707
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c1002
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c739
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.h41
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c1259
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h467
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c3158
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h468
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c771
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h35
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c630
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h119
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c1088
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h164
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h1095
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c819
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.h12
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c2451
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.h96
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c1283
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/Makefile29
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h256
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.c392
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h949
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c654
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h476
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c504
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h133
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c115
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.h39
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c130
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c1452
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h67
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c6013
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h757
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c2180
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h37
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_trace.h138
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h891
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c680
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h15
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c2589
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h774
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c128
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.h15
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c2942
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h231
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c12793
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h1149
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c1152
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c311
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h19
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c552
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h143
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c668
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.h17
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c2145
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h280
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h87
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h136
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c130
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.h15
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c3359
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h299
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c388
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c164
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.h13
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h87
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c652
76 files changed, 67549 insertions, 0 deletions
diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig
new file mode 100644
index 0000000000..3312e1d93c
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/Kconfig
@@ -0,0 +1,144 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# HISILICON device configuration
+#
+
+config NET_VENDOR_HISILICON
+ bool "Hisilicon devices"
+ default y
+ depends on OF || ACPI
+ depends on ARM || ARM64 || COMPILE_TEST
+ help
+ If you have a network (Ethernet) card belonging to this class, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Hisilicon devices. If you say Y, you will be asked
+ for your specific card in the following questions.
+
+if NET_VENDOR_HISILICON
+
+config HIX5HD2_GMAC
+ tristate "Hisilicon HIX5HD2 Family Network Device Support"
+ select PHYLIB
+ help
+ This selects the hix5hd2 mac family network device.
+
+config HISI_FEMAC
+ tristate "Hisilicon Fast Ethernet MAC device support"
+ depends on HAS_IOMEM
+ select PHYLIB
+ select RESET_CONTROLLER
+ help
+ This selects the Hisilicon Fast Ethernet MAC device(FEMAC).
+ The FEMAC receives and transmits data over Ethernet
+ ports at 10/100 Mbps in full-duplex or half-duplex mode.
+ The FEMAC exchanges data with the CPU, and supports
+ the energy efficient Ethernet (EEE).
+
+config HIP04_ETH
+ tristate "HISILICON P04 Ethernet support"
+ depends on HAS_IOMEM # For MFD_SYSCON
+ select MARVELL_PHY
+ select MFD_SYSCON
+ select HNS_MDIO
+ help
+ If you wish to compile a kernel for a hardware with hisilicon p04 SoC and
+ want to use the internal ethernet then you should answer Y to this.
+
+config HI13X1_GMAC
+ bool "Hisilicon HI13X1 Network Device Support"
+ depends on HIP04_ETH
+ help
+ If you wish to compile a kernel for a hardware with hisilicon hi13x1_gamc
+ then you should answer Y to this. This makes this driver suitable for use
+ on certain boards such as the HI13X1.
+
+ If you are unsure, say N.
+
+config HNS_MDIO
+ tristate
+ select PHYLIB
+ help
+ This selects the HNS MDIO support. It is needed by HNS_DSAF to access
+ the PHY
+
+config HNS
+ tristate
+ help
+ This selects the framework support for Hisilicon Network Subsystem. It
+ is needed by any driver which provides HNS acceleration engine or make
+ use of the engine
+
+config HNS_DSAF
+ tristate "Hisilicon HNS DSAF device Support"
+ select HNS
+ select HNS_MDIO
+ help
+ This selects the DSAF (Distributed System Area Frabric) network
+ acceleration engine support. The engine is used in Hisilicon hip05,
+ Hi1610 and further ICT SoC
+
+config HNS_ENET
+ tristate "Hisilicon HNS Ethernet Device Support"
+ select PHYLIB
+ select HNS
+ help
+ This selects the general ethernet driver for HNS. This module make
+ use of any HNS AE driver, such as HNS_DSAF
+
+config HNS3
+ tristate "Hisilicon Network Subsystem Support HNS3 (Framework)"
+ depends on PCI
+ select NET_DEVLINK
+ select PAGE_POOL
+ help
+ This selects the framework support for Hisilicon Network Subsystem 3.
+ This layer facilitates clients like ENET, RoCE and user-space ethernet
+ drivers(like ODP)to register with HNAE devices and their associated
+ operations.
+
+if HNS3
+
+config HNS3_HCLGE
+ tristate "Hisilicon HNS3 HCLGE Acceleration Engine & Compatibility Layer Support"
+ default m
+ depends on PCI_MSI
+ depends on PTP_1588_CLOCK_OPTIONAL
+ help
+ This selects the HNS3_HCLGE network acceleration engine & its hardware
+ compatibility layer. The engine would be used in Hisilicon hip08 family of
+ SoCs and further upcoming SoCs.
+
+config HNS3_DCB
+ bool "Hisilicon HNS3 Data Center Bridge Support"
+ default n
+ depends on HNS3_HCLGE && DCB
+ help
+ Say Y here if you want to use Data Center Bridging (DCB) in the HNS3 driver.
+
+ If unsure, say N.
+
+config HNS3_HCLGEVF
+ tristate "Hisilicon HNS3VF Acceleration Engine & Compatibility Layer Support"
+ depends on PCI_MSI
+ depends on HNS3_HCLGE
+ help
+ This selects the HNS3 VF drivers network acceleration engine & its hardware
+ compatibility layer. The engine would be used in Hisilicon hip08 family of
+ SoCs and further upcoming SoCs.
+
+config HNS3_ENET
+ tristate "Hisilicon HNS3 Ethernet Device Support"
+ default m
+ depends on 64BIT && PCI
+ depends on INET
+ select DIMLIB
+ help
+ This selects the Ethernet Driver for Hisilicon Network Subsystem 3 for hip08
+ family of SoCs. This module depends upon HNAE3 driver to access the HNAE3
+ devices and their associated operations.
+
+endif #HNS3
+
+endif # NET_VENDOR_HISILICON
diff --git a/drivers/net/ethernet/hisilicon/Makefile b/drivers/net/ethernet/hisilicon/Makefile
new file mode 100644
index 0000000000..7f76d41204
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the HISILICON network device drivers.
+#
+
+obj-$(CONFIG_HIX5HD2_GMAC) += hix5hd2_gmac.o
+obj-$(CONFIG_HIP04_ETH) += hip04_eth.o
+obj-$(CONFIG_HNS_MDIO) += hns_mdio.o
+obj-$(CONFIG_HNS) += hns/
+obj-$(CONFIG_HNS3) += hns3/
+obj-$(CONFIG_HISI_FEMAC) += hisi_femac.o
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
new file mode 100644
index 0000000000..ecf92a5d56
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -0,0 +1,1060 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+/* Copyright (c) 2014 Linaro Ltd.
+ * Copyright (c) 2014 Hisilicon Limited.
+ */
+
+#include <linux/module.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/ktime.h>
+#include <linux/of_address.h>
+#include <linux/phy.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+
+#define SC_PPE_RESET_DREQ 0x026C
+
+#define PPE_CFG_RX_ADDR 0x100
+#define PPE_CFG_POOL_GRP 0x300
+#define PPE_CFG_RX_BUF_SIZE 0x400
+#define PPE_CFG_RX_FIFO_SIZE 0x500
+#define PPE_CURR_BUF_CNT 0xa200
+
+#define GE_DUPLEX_TYPE 0x08
+#define GE_MAX_FRM_SIZE_REG 0x3c
+#define GE_PORT_MODE 0x40
+#define GE_PORT_EN 0x44
+#define GE_SHORT_RUNTS_THR_REG 0x50
+#define GE_TX_LOCAL_PAGE_REG 0x5c
+#define GE_TRANSMIT_CONTROL_REG 0x60
+#define GE_CF_CRC_STRIP_REG 0x1b0
+#define GE_MODE_CHANGE_REG 0x1b4
+#define GE_RECV_CONTROL_REG 0x1e0
+#define GE_STATION_MAC_ADDRESS 0x210
+
+#define PPE_CFG_BUS_CTRL_REG 0x424
+#define PPE_CFG_RX_CTRL_REG 0x428
+
+#if defined(CONFIG_HI13X1_GMAC)
+#define PPE_CFG_CPU_ADD_ADDR 0x6D0
+#define PPE_CFG_MAX_FRAME_LEN_REG 0x500
+#define PPE_CFG_RX_PKT_MODE_REG 0x504
+#define PPE_CFG_QOS_VMID_GEN 0x520
+#define PPE_CFG_RX_PKT_INT 0x740
+#define PPE_INTEN 0x700
+#define PPE_INTSTS 0x708
+#define PPE_RINT 0x704
+#define PPE_CFG_STS_MODE 0x880
+#else
+#define PPE_CFG_CPU_ADD_ADDR 0x580
+#define PPE_CFG_MAX_FRAME_LEN_REG 0x408
+#define PPE_CFG_RX_PKT_MODE_REG 0x438
+#define PPE_CFG_QOS_VMID_GEN 0x500
+#define PPE_CFG_RX_PKT_INT 0x538
+#define PPE_INTEN 0x600
+#define PPE_INTSTS 0x608
+#define PPE_RINT 0x604
+#define PPE_CFG_STS_MODE 0x700
+#endif /* CONFIG_HI13X1_GMAC */
+
+#define PPE_HIS_RX_PKT_CNT 0x804
+
+#define RESET_DREQ_ALL 0xffffffff
+
+/* REG_INTERRUPT */
+#define RCV_INT BIT(10)
+#define RCV_NOBUF BIT(8)
+#define RCV_DROP BIT(7)
+#define TX_DROP BIT(6)
+#define DEF_INT_ERR (RCV_NOBUF | RCV_DROP | TX_DROP)
+#define DEF_INT_MASK (RCV_INT | DEF_INT_ERR)
+
+/* TX descriptor config */
+#define TX_FREE_MEM BIT(0)
+#define TX_READ_ALLOC_L3 BIT(1)
+#if defined(CONFIG_HI13X1_GMAC)
+#define TX_CLEAR_WB BIT(7)
+#define TX_RELEASE_TO_PPE BIT(4)
+#define TX_FINISH_CACHE_INV BIT(6)
+#define TX_POOL_SHIFT 16
+#else
+#define TX_CLEAR_WB BIT(4)
+#define TX_FINISH_CACHE_INV BIT(2)
+#endif
+#define TX_L3_CHECKSUM BIT(5)
+#define TX_LOOP_BACK BIT(11)
+
+/* RX error */
+#define RX_PKT_DROP BIT(0)
+#define RX_L2_ERR BIT(1)
+#define RX_PKT_ERR (RX_PKT_DROP | RX_L2_ERR)
+
+#define SGMII_SPEED_1000 0x08
+#define SGMII_SPEED_100 0x07
+#define SGMII_SPEED_10 0x06
+#define MII_SPEED_100 0x01
+#define MII_SPEED_10 0x00
+
+#define GE_DUPLEX_FULL BIT(0)
+#define GE_DUPLEX_HALF 0x00
+#define GE_MODE_CHANGE_EN BIT(0)
+
+#define GE_TX_AUTO_NEG BIT(5)
+#define GE_TX_ADD_CRC BIT(6)
+#define GE_TX_SHORT_PAD_THROUGH BIT(7)
+
+#define GE_RX_STRIP_CRC BIT(0)
+#define GE_RX_STRIP_PAD BIT(3)
+#define GE_RX_PAD_EN BIT(4)
+
+#define GE_AUTO_NEG_CTL BIT(0)
+
+#define GE_RX_INT_THRESHOLD BIT(6)
+#define GE_RX_TIMEOUT 0x04
+
+#define GE_RX_PORT_EN BIT(1)
+#define GE_TX_PORT_EN BIT(2)
+
+#define PPE_CFG_RX_PKT_ALIGN BIT(18)
+
+#if defined(CONFIG_HI13X1_GMAC)
+#define PPE_CFG_QOS_VMID_GRP_SHIFT 4
+#define PPE_CFG_RX_CTRL_ALIGN_SHIFT 7
+#define PPE_CFG_STS_RX_PKT_CNT_RC BIT(0)
+#define PPE_CFG_QOS_VMID_MODE BIT(15)
+#define PPE_CFG_BUS_LOCAL_REL (BIT(9) | BIT(15) | BIT(19) | BIT(23))
+
+/* buf unit size is cache_line_size, which is 64, so the shift is 6 */
+#define PPE_BUF_SIZE_SHIFT 6
+#define PPE_TX_BUF_HOLD BIT(31)
+#define SOC_CACHE_LINE_MASK 0x3F
+#else
+#define PPE_CFG_QOS_VMID_GRP_SHIFT 8
+#define PPE_CFG_RX_CTRL_ALIGN_SHIFT 11
+#define PPE_CFG_STS_RX_PKT_CNT_RC BIT(12)
+#define PPE_CFG_QOS_VMID_MODE BIT(14)
+#define PPE_CFG_BUS_LOCAL_REL BIT(14)
+
+/* buf unit size is 1, so the shift is 6 */
+#define PPE_BUF_SIZE_SHIFT 0
+#define PPE_TX_BUF_HOLD 0
+#endif /* CONFIG_HI13X1_GMAC */
+
+#define PPE_CFG_RX_FIFO_FSFU BIT(11)
+#define PPE_CFG_RX_DEPTH_SHIFT 16
+#define PPE_CFG_RX_START_SHIFT 0
+
+#define PPE_CFG_BUS_BIG_ENDIEN BIT(0)
+
+#define RX_DESC_NUM 128
+#define TX_DESC_NUM 256
+#define TX_NEXT(N) (((N) + 1) & (TX_DESC_NUM-1))
+#define RX_NEXT(N) (((N) + 1) & (RX_DESC_NUM-1))
+
+#define GMAC_PPE_RX_PKT_MAX_LEN 379
+#define GMAC_MAX_PKT_LEN 1516
+#define GMAC_MIN_PKT_LEN 31
+#define RX_BUF_SIZE 1600
+#define RESET_TIMEOUT 1000
+#define TX_TIMEOUT (6 * HZ)
+
+#define DRV_NAME "hip04-ether"
+#define DRV_VERSION "v1.0"
+
+#define HIP04_MAX_TX_COALESCE_USECS 200
+#define HIP04_MIN_TX_COALESCE_USECS 100
+#define HIP04_MAX_TX_COALESCE_FRAMES 200
+#define HIP04_MIN_TX_COALESCE_FRAMES 100
+
+struct tx_desc {
+#if defined(CONFIG_HI13X1_GMAC)
+ u32 reserved1[2];
+ u32 send_addr;
+ u16 send_size;
+ u16 data_offset;
+ u32 reserved2[7];
+ u32 cfg;
+ u32 wb_addr;
+ u32 reserved3[3];
+#else
+ u32 send_addr;
+ u32 send_size;
+ u32 next_addr;
+ u32 cfg;
+ u32 wb_addr;
+#endif
+} __aligned(64);
+
+struct rx_desc {
+#if defined(CONFIG_HI13X1_GMAC)
+ u32 reserved1[3];
+ u16 pkt_len;
+ u16 reserved_16;
+ u32 reserved2[6];
+ u32 pkt_err;
+ u32 reserved3[5];
+#else
+ u16 reserved_16;
+ u16 pkt_len;
+ u32 reserve1[3];
+ u32 pkt_err;
+ u32 reserve2[4];
+#endif
+};
+
+struct hip04_priv {
+ void __iomem *base;
+#if defined(CONFIG_HI13X1_GMAC)
+ void __iomem *sysctrl_base;
+#endif
+ phy_interface_t phy_mode;
+ int chan;
+ unsigned int port;
+ unsigned int group;
+ unsigned int speed;
+ unsigned int duplex;
+ unsigned int reg_inten;
+
+ struct napi_struct napi;
+ struct device *dev;
+ struct net_device *ndev;
+
+ struct tx_desc *tx_desc;
+ dma_addr_t tx_desc_dma;
+ struct sk_buff *tx_skb[TX_DESC_NUM];
+ dma_addr_t tx_phys[TX_DESC_NUM];
+ unsigned int tx_head;
+
+ int tx_coalesce_frames;
+ int tx_coalesce_usecs;
+ struct hrtimer tx_coalesce_timer;
+
+ unsigned char *rx_buf[RX_DESC_NUM];
+ dma_addr_t rx_phys[RX_DESC_NUM];
+ unsigned int rx_head;
+ unsigned int rx_buf_size;
+ unsigned int rx_cnt_remaining;
+
+ struct device_node *phy_node;
+ struct phy_device *phy;
+ struct regmap *map;
+ struct work_struct tx_timeout_task;
+
+ /* written only by tx cleanup */
+ unsigned int tx_tail ____cacheline_aligned_in_smp;
+};
+
+static inline unsigned int tx_count(unsigned int head, unsigned int tail)
+{
+ return (head - tail) % TX_DESC_NUM;
+}
+
+static void hip04_config_port(struct net_device *ndev, u32 speed, u32 duplex)
+{
+ struct hip04_priv *priv = netdev_priv(ndev);
+ u32 val;
+
+ priv->speed = speed;
+ priv->duplex = duplex;
+
+ switch (priv->phy_mode) {
+ case PHY_INTERFACE_MODE_SGMII:
+ if (speed == SPEED_1000)
+ val = SGMII_SPEED_1000;
+ else if (speed == SPEED_100)
+ val = SGMII_SPEED_100;
+ else
+ val = SGMII_SPEED_10;
+ break;
+ case PHY_INTERFACE_MODE_MII:
+ if (speed == SPEED_100)
+ val = MII_SPEED_100;
+ else
+ val = MII_SPEED_10;
+ break;
+ default:
+ netdev_warn(ndev, "not supported mode\n");
+ val = MII_SPEED_10;
+ break;
+ }
+ writel_relaxed(val, priv->base + GE_PORT_MODE);
+
+ val = duplex ? GE_DUPLEX_FULL : GE_DUPLEX_HALF;
+ writel_relaxed(val, priv->base + GE_DUPLEX_TYPE);
+
+ val = GE_MODE_CHANGE_EN;
+ writel_relaxed(val, priv->base + GE_MODE_CHANGE_REG);
+}
+
+static void hip04_reset_dreq(struct hip04_priv *priv)
+{
+#if defined(CONFIG_HI13X1_GMAC)
+ writel_relaxed(RESET_DREQ_ALL, priv->sysctrl_base + SC_PPE_RESET_DREQ);
+#endif
+}
+
+static void hip04_reset_ppe(struct hip04_priv *priv)
+{
+ u32 val, tmp, timeout = 0;
+
+ do {
+ regmap_read(priv->map, priv->port * 4 + PPE_CURR_BUF_CNT, &val);
+ regmap_read(priv->map, priv->port * 4 + PPE_CFG_RX_ADDR, &tmp);
+ if (timeout++ > RESET_TIMEOUT)
+ break;
+ } while (val & 0xfff);
+}
+
+static void hip04_config_fifo(struct hip04_priv *priv)
+{
+ u32 val;
+
+ val = readl_relaxed(priv->base + PPE_CFG_STS_MODE);
+ val |= PPE_CFG_STS_RX_PKT_CNT_RC;
+ writel_relaxed(val, priv->base + PPE_CFG_STS_MODE);
+
+ val = BIT(priv->group);
+ regmap_write(priv->map, priv->port * 4 + PPE_CFG_POOL_GRP, val);
+
+ val = priv->group << PPE_CFG_QOS_VMID_GRP_SHIFT;
+ val |= PPE_CFG_QOS_VMID_MODE;
+ writel_relaxed(val, priv->base + PPE_CFG_QOS_VMID_GEN);
+
+ val = RX_BUF_SIZE >> PPE_BUF_SIZE_SHIFT;
+ regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_BUF_SIZE, val);
+
+ val = RX_DESC_NUM << PPE_CFG_RX_DEPTH_SHIFT;
+ val |= PPE_CFG_RX_FIFO_FSFU;
+ val |= priv->chan << PPE_CFG_RX_START_SHIFT;
+ regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_FIFO_SIZE, val);
+
+ val = NET_IP_ALIGN << PPE_CFG_RX_CTRL_ALIGN_SHIFT;
+ writel_relaxed(val, priv->base + PPE_CFG_RX_CTRL_REG);
+
+ val = PPE_CFG_RX_PKT_ALIGN;
+ writel_relaxed(val, priv->base + PPE_CFG_RX_PKT_MODE_REG);
+
+ val = PPE_CFG_BUS_LOCAL_REL | PPE_CFG_BUS_BIG_ENDIEN;
+ writel_relaxed(val, priv->base + PPE_CFG_BUS_CTRL_REG);
+
+ val = GMAC_PPE_RX_PKT_MAX_LEN;
+ writel_relaxed(val, priv->base + PPE_CFG_MAX_FRAME_LEN_REG);
+
+ val = GMAC_MAX_PKT_LEN;
+ writel_relaxed(val, priv->base + GE_MAX_FRM_SIZE_REG);
+
+ val = GMAC_MIN_PKT_LEN;
+ writel_relaxed(val, priv->base + GE_SHORT_RUNTS_THR_REG);
+
+ val = readl_relaxed(priv->base + GE_TRANSMIT_CONTROL_REG);
+ val |= GE_TX_AUTO_NEG | GE_TX_ADD_CRC | GE_TX_SHORT_PAD_THROUGH;
+ writel_relaxed(val, priv->base + GE_TRANSMIT_CONTROL_REG);
+
+ val = GE_RX_STRIP_CRC;
+ writel_relaxed(val, priv->base + GE_CF_CRC_STRIP_REG);
+
+ val = readl_relaxed(priv->base + GE_RECV_CONTROL_REG);
+ val |= GE_RX_STRIP_PAD | GE_RX_PAD_EN;
+ writel_relaxed(val, priv->base + GE_RECV_CONTROL_REG);
+
+#ifndef CONFIG_HI13X1_GMAC
+ val = GE_AUTO_NEG_CTL;
+ writel_relaxed(val, priv->base + GE_TX_LOCAL_PAGE_REG);
+#endif
+}
+
+static void hip04_mac_enable(struct net_device *ndev)
+{
+ struct hip04_priv *priv = netdev_priv(ndev);
+ u32 val;
+
+ /* enable tx & rx */
+ val = readl_relaxed(priv->base + GE_PORT_EN);
+ val |= GE_RX_PORT_EN | GE_TX_PORT_EN;
+ writel_relaxed(val, priv->base + GE_PORT_EN);
+
+ /* clear rx int */
+ val = RCV_INT;
+ writel_relaxed(val, priv->base + PPE_RINT);
+
+ /* config recv int */
+ val = GE_RX_INT_THRESHOLD | GE_RX_TIMEOUT;
+ writel_relaxed(val, priv->base + PPE_CFG_RX_PKT_INT);
+
+ /* enable interrupt */
+ priv->reg_inten = DEF_INT_MASK;
+ writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN);
+}
+
+static void hip04_mac_disable(struct net_device *ndev)
+{
+ struct hip04_priv *priv = netdev_priv(ndev);
+ u32 val;
+
+ /* disable int */
+ priv->reg_inten &= ~(DEF_INT_MASK);
+ writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN);
+
+ /* disable tx & rx */
+ val = readl_relaxed(priv->base + GE_PORT_EN);
+ val &= ~(GE_RX_PORT_EN | GE_TX_PORT_EN);
+ writel_relaxed(val, priv->base + GE_PORT_EN);
+}
+
+static void hip04_set_xmit_desc(struct hip04_priv *priv, dma_addr_t phys)
+{
+ u32 val;
+
+ val = phys >> PPE_BUF_SIZE_SHIFT | PPE_TX_BUF_HOLD;
+ writel(val, priv->base + PPE_CFG_CPU_ADD_ADDR);
+}
+
+static void hip04_set_recv_desc(struct hip04_priv *priv, dma_addr_t phys)
+{
+ u32 val;
+
+ val = phys >> PPE_BUF_SIZE_SHIFT;
+ regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_ADDR, val);
+}
+
+static u32 hip04_recv_cnt(struct hip04_priv *priv)
+{
+ return readl(priv->base + PPE_HIS_RX_PKT_CNT);
+}
+
+static void hip04_update_mac_address(struct net_device *ndev)
+{
+ struct hip04_priv *priv = netdev_priv(ndev);
+
+ writel_relaxed(((ndev->dev_addr[0] << 8) | (ndev->dev_addr[1])),
+ priv->base + GE_STATION_MAC_ADDRESS);
+ writel_relaxed(((ndev->dev_addr[2] << 24) | (ndev->dev_addr[3] << 16) |
+ (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5])),
+ priv->base + GE_STATION_MAC_ADDRESS + 4);
+}
+
+static int hip04_set_mac_address(struct net_device *ndev, void *addr)
+{
+ eth_mac_addr(ndev, addr);
+ hip04_update_mac_address(ndev);
+ return 0;
+}
+
+static int hip04_tx_reclaim(struct net_device *ndev, bool force)
+{
+ struct hip04_priv *priv = netdev_priv(ndev);
+ unsigned tx_tail = priv->tx_tail;
+ struct tx_desc *desc;
+ unsigned int bytes_compl = 0, pkts_compl = 0;
+ unsigned int count;
+
+ smp_rmb();
+ count = tx_count(READ_ONCE(priv->tx_head), tx_tail);
+ if (count == 0)
+ goto out;
+
+ while (count) {
+ desc = &priv->tx_desc[tx_tail];
+ if (desc->send_addr != 0) {
+ if (force)
+ desc->send_addr = 0;
+ else
+ break;
+ }
+
+ if (priv->tx_phys[tx_tail]) {
+ dma_unmap_single(priv->dev, priv->tx_phys[tx_tail],
+ priv->tx_skb[tx_tail]->len,
+ DMA_TO_DEVICE);
+ priv->tx_phys[tx_tail] = 0;
+ }
+ pkts_compl++;
+ bytes_compl += priv->tx_skb[tx_tail]->len;
+ dev_kfree_skb(priv->tx_skb[tx_tail]);
+ priv->tx_skb[tx_tail] = NULL;
+ tx_tail = TX_NEXT(tx_tail);
+ count--;
+ }
+
+ priv->tx_tail = tx_tail;
+ smp_wmb(); /* Ensure tx_tail visible to xmit */
+
+out:
+ if (pkts_compl || bytes_compl)
+ netdev_completed_queue(ndev, pkts_compl, bytes_compl);
+
+ if (unlikely(netif_queue_stopped(ndev)) && (count < (TX_DESC_NUM - 1)))
+ netif_wake_queue(ndev);
+
+ return count;
+}
+
+static void hip04_start_tx_timer(struct hip04_priv *priv)
+{
+ unsigned long ns = priv->tx_coalesce_usecs * NSEC_PER_USEC / 2;
+
+ /* allow timer to fire after half the time at the earliest */
+ hrtimer_start_range_ns(&priv->tx_coalesce_timer, ns_to_ktime(ns),
+ ns, HRTIMER_MODE_REL);
+}
+
+static netdev_tx_t
+hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct hip04_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ unsigned int tx_head = priv->tx_head, count;
+ struct tx_desc *desc = &priv->tx_desc[tx_head];
+ dma_addr_t phys;
+
+ smp_rmb();
+ count = tx_count(tx_head, READ_ONCE(priv->tx_tail));
+ if (count == (TX_DESC_NUM - 1)) {
+ netif_stop_queue(ndev);
+ return NETDEV_TX_BUSY;
+ }
+
+ phys = dma_map_single(priv->dev, skb->data, skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->dev, phys)) {
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ priv->tx_skb[tx_head] = skb;
+ priv->tx_phys[tx_head] = phys;
+
+ desc->send_size = (__force u32)cpu_to_be32(skb->len);
+#if defined(CONFIG_HI13X1_GMAC)
+ desc->cfg = (__force u32)cpu_to_be32(TX_CLEAR_WB | TX_FINISH_CACHE_INV
+ | TX_RELEASE_TO_PPE | priv->port << TX_POOL_SHIFT);
+ desc->data_offset = (__force u32)cpu_to_be32(phys & SOC_CACHE_LINE_MASK);
+ desc->send_addr = (__force u32)cpu_to_be32(phys & ~SOC_CACHE_LINE_MASK);
+#else
+ desc->cfg = (__force u32)cpu_to_be32(TX_CLEAR_WB | TX_FINISH_CACHE_INV);
+ desc->send_addr = (__force u32)cpu_to_be32(phys);
+#endif
+ phys = priv->tx_desc_dma + tx_head * sizeof(struct tx_desc);
+ desc->wb_addr = (__force u32)cpu_to_be32(phys +
+ offsetof(struct tx_desc, send_addr));
+ skb_tx_timestamp(skb);
+
+ hip04_set_xmit_desc(priv, phys);
+ count++;
+ netdev_sent_queue(ndev, skb->len);
+ priv->tx_head = TX_NEXT(tx_head);
+
+ stats->tx_bytes += skb->len;
+ stats->tx_packets++;
+
+ /* Ensure tx_head update visible to tx reclaim */
+ smp_wmb();
+
+ /* queue is getting full, better start cleaning up now */
+ if (count >= priv->tx_coalesce_frames) {
+ if (napi_schedule_prep(&priv->napi)) {
+ /* disable rx interrupt and timer */
+ priv->reg_inten &= ~(RCV_INT);
+ writel_relaxed(DEF_INT_MASK & ~RCV_INT,
+ priv->base + PPE_INTEN);
+ hrtimer_cancel(&priv->tx_coalesce_timer);
+ __napi_schedule(&priv->napi);
+ }
+ } else if (!hrtimer_is_queued(&priv->tx_coalesce_timer)) {
+ /* cleanup not pending yet, start a new timer */
+ hip04_start_tx_timer(priv);
+ }
+
+ return NETDEV_TX_OK;
+}
+
+static int hip04_rx_poll(struct napi_struct *napi, int budget)
+{
+ struct hip04_priv *priv = container_of(napi, struct hip04_priv, napi);
+ struct net_device *ndev = priv->ndev;
+ struct net_device_stats *stats = &ndev->stats;
+ struct rx_desc *desc;
+ struct sk_buff *skb;
+ unsigned char *buf;
+ bool last = false;
+ dma_addr_t phys;
+ int rx = 0;
+ int tx_remaining;
+ u16 len;
+ u32 err;
+
+ /* clean up tx descriptors */
+ tx_remaining = hip04_tx_reclaim(ndev, false);
+ priv->rx_cnt_remaining += hip04_recv_cnt(priv);
+ while (priv->rx_cnt_remaining && !last) {
+ buf = priv->rx_buf[priv->rx_head];
+ skb = build_skb(buf, priv->rx_buf_size);
+ if (unlikely(!skb)) {
+ net_dbg_ratelimited("build_skb failed\n");
+ goto refill;
+ }
+
+ dma_unmap_single(priv->dev, priv->rx_phys[priv->rx_head],
+ RX_BUF_SIZE, DMA_FROM_DEVICE);
+ priv->rx_phys[priv->rx_head] = 0;
+
+ desc = (struct rx_desc *)skb->data;
+ len = be16_to_cpu((__force __be16)desc->pkt_len);
+ err = be32_to_cpu((__force __be32)desc->pkt_err);
+
+ if (0 == len) {
+ dev_kfree_skb_any(skb);
+ last = true;
+ } else if ((err & RX_PKT_ERR) || (len >= GMAC_MAX_PKT_LEN)) {
+ dev_kfree_skb_any(skb);
+ stats->rx_dropped++;
+ stats->rx_errors++;
+ } else {
+ skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, ndev);
+ napi_gro_receive(&priv->napi, skb);
+ stats->rx_packets++;
+ stats->rx_bytes += len;
+ rx++;
+ }
+
+refill:
+ buf = netdev_alloc_frag(priv->rx_buf_size);
+ if (!buf)
+ goto done;
+ phys = dma_map_single(priv->dev, buf,
+ RX_BUF_SIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(priv->dev, phys))
+ goto done;
+ priv->rx_buf[priv->rx_head] = buf;
+ priv->rx_phys[priv->rx_head] = phys;
+ hip04_set_recv_desc(priv, phys);
+
+ priv->rx_head = RX_NEXT(priv->rx_head);
+ if (rx >= budget) {
+ --priv->rx_cnt_remaining;
+ goto done;
+ }
+
+ if (--priv->rx_cnt_remaining == 0)
+ priv->rx_cnt_remaining += hip04_recv_cnt(priv);
+ }
+
+ if (!(priv->reg_inten & RCV_INT)) {
+ /* enable rx interrupt */
+ priv->reg_inten |= RCV_INT;
+ writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN);
+ }
+ napi_complete_done(napi, rx);
+done:
+ /* start a new timer if necessary */
+ if (rx < budget && tx_remaining)
+ hip04_start_tx_timer(priv);
+
+ return rx;
+}
+
+static irqreturn_t hip04_mac_interrupt(int irq, void *dev_id)
+{
+ struct net_device *ndev = (struct net_device *)dev_id;
+ struct hip04_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ u32 ists = readl_relaxed(priv->base + PPE_INTSTS);
+
+ if (!ists)
+ return IRQ_NONE;
+
+ writel_relaxed(DEF_INT_MASK, priv->base + PPE_RINT);
+
+ if (unlikely(ists & DEF_INT_ERR)) {
+ if (ists & (RCV_NOBUF | RCV_DROP)) {
+ stats->rx_errors++;
+ stats->rx_dropped++;
+ netdev_err(ndev, "rx drop\n");
+ }
+ if (ists & TX_DROP) {
+ stats->tx_dropped++;
+ netdev_err(ndev, "tx drop\n");
+ }
+ }
+
+ if (ists & RCV_INT && napi_schedule_prep(&priv->napi)) {
+ /* disable rx interrupt */
+ priv->reg_inten &= ~(RCV_INT);
+ writel_relaxed(DEF_INT_MASK & ~RCV_INT, priv->base + PPE_INTEN);
+ hrtimer_cancel(&priv->tx_coalesce_timer);
+ __napi_schedule(&priv->napi);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static enum hrtimer_restart tx_done(struct hrtimer *hrtimer)
+{
+ struct hip04_priv *priv;
+
+ priv = container_of(hrtimer, struct hip04_priv, tx_coalesce_timer);
+
+ if (napi_schedule_prep(&priv->napi)) {
+ /* disable rx interrupt */
+ priv->reg_inten &= ~(RCV_INT);
+ writel_relaxed(DEF_INT_MASK & ~RCV_INT, priv->base + PPE_INTEN);
+ __napi_schedule(&priv->napi);
+ }
+
+ return HRTIMER_NORESTART;
+}
+
+static void hip04_adjust_link(struct net_device *ndev)
+{
+ struct hip04_priv *priv = netdev_priv(ndev);
+ struct phy_device *phy = priv->phy;
+
+ if ((priv->speed != phy->speed) || (priv->duplex != phy->duplex)) {
+ hip04_config_port(ndev, phy->speed, phy->duplex);
+ phy_print_status(phy);
+ }
+}
+
+static int hip04_mac_open(struct net_device *ndev)
+{
+ struct hip04_priv *priv = netdev_priv(ndev);
+ int i;
+
+ priv->rx_head = 0;
+ priv->rx_cnt_remaining = 0;
+ priv->tx_head = 0;
+ priv->tx_tail = 0;
+ hip04_reset_ppe(priv);
+
+ for (i = 0; i < RX_DESC_NUM; i++) {
+ dma_addr_t phys;
+
+ phys = dma_map_single(priv->dev, priv->rx_buf[i],
+ RX_BUF_SIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(priv->dev, phys))
+ return -EIO;
+
+ priv->rx_phys[i] = phys;
+ hip04_set_recv_desc(priv, phys);
+ }
+
+ if (priv->phy)
+ phy_start(priv->phy);
+
+ netdev_reset_queue(ndev);
+ netif_start_queue(ndev);
+ hip04_mac_enable(ndev);
+ napi_enable(&priv->napi);
+
+ return 0;
+}
+
+static int hip04_mac_stop(struct net_device *ndev)
+{
+ struct hip04_priv *priv = netdev_priv(ndev);
+ int i;
+
+ napi_disable(&priv->napi);
+ netif_stop_queue(ndev);
+ hip04_mac_disable(ndev);
+ hip04_tx_reclaim(ndev, true);
+ hip04_reset_ppe(priv);
+
+ if (priv->phy)
+ phy_stop(priv->phy);
+
+ for (i = 0; i < RX_DESC_NUM; i++) {
+ if (priv->rx_phys[i]) {
+ dma_unmap_single(priv->dev, priv->rx_phys[i],
+ RX_BUF_SIZE, DMA_FROM_DEVICE);
+ priv->rx_phys[i] = 0;
+ }
+ }
+
+ return 0;
+}
+
+static void hip04_timeout(struct net_device *ndev, unsigned int txqueue)
+{
+ struct hip04_priv *priv = netdev_priv(ndev);
+
+ schedule_work(&priv->tx_timeout_task);
+}
+
+static void hip04_tx_timeout_task(struct work_struct *work)
+{
+ struct hip04_priv *priv;
+
+ priv = container_of(work, struct hip04_priv, tx_timeout_task);
+ hip04_mac_stop(priv->ndev);
+ hip04_mac_open(priv->ndev);
+}
+
+static int hip04_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct hip04_priv *priv = netdev_priv(netdev);
+
+ ec->tx_coalesce_usecs = priv->tx_coalesce_usecs;
+ ec->tx_max_coalesced_frames = priv->tx_coalesce_frames;
+
+ return 0;
+}
+
+static int hip04_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct hip04_priv *priv = netdev_priv(netdev);
+
+ if ((ec->tx_coalesce_usecs > HIP04_MAX_TX_COALESCE_USECS ||
+ ec->tx_coalesce_usecs < HIP04_MIN_TX_COALESCE_USECS) ||
+ (ec->tx_max_coalesced_frames > HIP04_MAX_TX_COALESCE_FRAMES ||
+ ec->tx_max_coalesced_frames < HIP04_MIN_TX_COALESCE_FRAMES))
+ return -EINVAL;
+
+ priv->tx_coalesce_usecs = ec->tx_coalesce_usecs;
+ priv->tx_coalesce_frames = ec->tx_max_coalesced_frames;
+
+ return 0;
+}
+
+static void hip04_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
+}
+
+static const struct ethtool_ops hip04_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_TX_USECS |
+ ETHTOOL_COALESCE_TX_MAX_FRAMES,
+ .get_coalesce = hip04_get_coalesce,
+ .set_coalesce = hip04_set_coalesce,
+ .get_drvinfo = hip04_get_drvinfo,
+};
+
+static const struct net_device_ops hip04_netdev_ops = {
+ .ndo_open = hip04_mac_open,
+ .ndo_stop = hip04_mac_stop,
+ .ndo_start_xmit = hip04_mac_start_xmit,
+ .ndo_set_mac_address = hip04_set_mac_address,
+ .ndo_tx_timeout = hip04_timeout,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static int hip04_alloc_ring(struct net_device *ndev, struct device *d)
+{
+ struct hip04_priv *priv = netdev_priv(ndev);
+ int i;
+
+ priv->tx_desc = dma_alloc_coherent(d,
+ TX_DESC_NUM * sizeof(struct tx_desc),
+ &priv->tx_desc_dma, GFP_KERNEL);
+ if (!priv->tx_desc)
+ return -ENOMEM;
+
+ priv->rx_buf_size = RX_BUF_SIZE +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ for (i = 0; i < RX_DESC_NUM; i++) {
+ priv->rx_buf[i] = netdev_alloc_frag(priv->rx_buf_size);
+ if (!priv->rx_buf[i])
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void hip04_free_ring(struct net_device *ndev, struct device *d)
+{
+ struct hip04_priv *priv = netdev_priv(ndev);
+ int i;
+
+ for (i = 0; i < RX_DESC_NUM; i++)
+ if (priv->rx_buf[i])
+ skb_free_frag(priv->rx_buf[i]);
+
+ for (i = 0; i < TX_DESC_NUM; i++)
+ if (priv->tx_skb[i])
+ dev_kfree_skb_any(priv->tx_skb[i]);
+
+ dma_free_coherent(d, TX_DESC_NUM * sizeof(struct tx_desc),
+ priv->tx_desc, priv->tx_desc_dma);
+}
+
+static int hip04_mac_probe(struct platform_device *pdev)
+{
+ struct device *d = &pdev->dev;
+ struct device_node *node = d->of_node;
+ struct of_phandle_args arg;
+ struct net_device *ndev;
+ struct hip04_priv *priv;
+ int irq;
+ int ret;
+
+ ndev = alloc_etherdev(sizeof(struct hip04_priv));
+ if (!ndev)
+ return -ENOMEM;
+
+ priv = netdev_priv(ndev);
+ priv->dev = d;
+ priv->ndev = ndev;
+ platform_set_drvdata(pdev, ndev);
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base)) {
+ ret = PTR_ERR(priv->base);
+ goto init_fail;
+ }
+
+#if defined(CONFIG_HI13X1_GMAC)
+ priv->sysctrl_base = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(priv->sysctrl_base)) {
+ ret = PTR_ERR(priv->sysctrl_base);
+ goto init_fail;
+ }
+#endif
+
+ ret = of_parse_phandle_with_fixed_args(node, "port-handle", 3, 0, &arg);
+ if (ret < 0) {
+ dev_warn(d, "no port-handle\n");
+ goto init_fail;
+ }
+
+ priv->port = arg.args[0];
+ priv->chan = arg.args[1] * RX_DESC_NUM;
+ priv->group = arg.args[2];
+
+ hrtimer_init(&priv->tx_coalesce_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+
+ /* BQL will try to keep the TX queue as short as possible, but it can't
+ * be faster than tx_coalesce_usecs, so we need a fast timeout here,
+ * but also long enough to gather up enough frames to ensure we don't
+ * get more interrupts than necessary.
+ * 200us is enough for 16 frames of 1500 bytes at gigabit ethernet rate
+ */
+ priv->tx_coalesce_frames = TX_DESC_NUM * 3 / 4;
+ priv->tx_coalesce_usecs = 200;
+ priv->tx_coalesce_timer.function = tx_done;
+
+ priv->map = syscon_node_to_regmap(arg.np);
+ if (IS_ERR(priv->map)) {
+ dev_warn(d, "no syscon hisilicon,hip04-ppe\n");
+ ret = PTR_ERR(priv->map);
+ goto init_fail;
+ }
+
+ ret = of_get_phy_mode(node, &priv->phy_mode);
+ if (ret) {
+ dev_warn(d, "not find phy-mode\n");
+ goto init_fail;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto init_fail;
+ }
+
+ ret = devm_request_irq(d, irq, hip04_mac_interrupt,
+ 0, pdev->name, ndev);
+ if (ret) {
+ netdev_err(ndev, "devm_request_irq failed\n");
+ goto init_fail;
+ }
+
+ priv->phy_node = of_parse_phandle(node, "phy-handle", 0);
+ if (priv->phy_node) {
+ priv->phy = of_phy_connect(ndev, priv->phy_node,
+ &hip04_adjust_link,
+ 0, priv->phy_mode);
+ if (!priv->phy) {
+ ret = -EPROBE_DEFER;
+ goto init_fail;
+ }
+ }
+
+ INIT_WORK(&priv->tx_timeout_task, hip04_tx_timeout_task);
+
+ ndev->netdev_ops = &hip04_netdev_ops;
+ ndev->ethtool_ops = &hip04_ethtool_ops;
+ ndev->watchdog_timeo = TX_TIMEOUT;
+ ndev->priv_flags |= IFF_UNICAST_FLT;
+ ndev->irq = irq;
+ netif_napi_add(ndev, &priv->napi, hip04_rx_poll);
+
+ hip04_reset_dreq(priv);
+ hip04_reset_ppe(priv);
+ if (priv->phy_mode == PHY_INTERFACE_MODE_MII)
+ hip04_config_port(ndev, SPEED_100, DUPLEX_FULL);
+
+ hip04_config_fifo(priv);
+ eth_hw_addr_random(ndev);
+ hip04_update_mac_address(ndev);
+
+ ret = hip04_alloc_ring(ndev, d);
+ if (ret) {
+ netdev_err(ndev, "alloc ring fail\n");
+ goto alloc_fail;
+ }
+
+ ret = register_netdev(ndev);
+ if (ret)
+ goto alloc_fail;
+
+ return 0;
+
+alloc_fail:
+ hip04_free_ring(ndev, d);
+init_fail:
+ of_node_put(priv->phy_node);
+ free_netdev(ndev);
+ return ret;
+}
+
+static int hip04_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct hip04_priv *priv = netdev_priv(ndev);
+ struct device *d = &pdev->dev;
+
+ if (priv->phy)
+ phy_disconnect(priv->phy);
+
+ hip04_free_ring(ndev, d);
+ unregister_netdev(ndev);
+ of_node_put(priv->phy_node);
+ cancel_work_sync(&priv->tx_timeout_task);
+ free_netdev(ndev);
+
+ return 0;
+}
+
+static const struct of_device_id hip04_mac_match[] = {
+ { .compatible = "hisilicon,hip04-mac" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, hip04_mac_match);
+
+static struct platform_driver hip04_mac_driver = {
+ .probe = hip04_mac_probe,
+ .remove = hip04_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = hip04_mac_match,
+ },
+};
+module_platform_driver(hip04_mac_driver);
+
+MODULE_DESCRIPTION("HISILICON P04 Ethernet driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/hisilicon/hisi_femac.c b/drivers/net/ethernet/hisilicon/hisi_femac.c
new file mode 100644
index 0000000000..cb7b0293fe
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hisi_femac.c
@@ -0,0 +1,976 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Hisilicon Fast Ethernet MAC Driver
+ *
+ * Copyright (c) 2016 HiSilicon Technologies Co., Ltd.
+ */
+
+#include <linux/circ_buf.h>
+#include <linux/clk.h>
+#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+/* MAC control register list */
+#define MAC_PORTSEL 0x0200
+#define MAC_PORTSEL_STAT_CPU BIT(0)
+#define MAC_PORTSEL_RMII BIT(1)
+#define MAC_PORTSET 0x0208
+#define MAC_PORTSET_DUPLEX_FULL BIT(0)
+#define MAC_PORTSET_LINKED BIT(1)
+#define MAC_PORTSET_SPEED_100M BIT(2)
+#define MAC_SET 0x0210
+#define MAX_FRAME_SIZE 1600
+#define MAX_FRAME_SIZE_MASK GENMASK(10, 0)
+#define BIT_PAUSE_EN BIT(18)
+#define RX_COALESCE_SET 0x0340
+#define RX_COALESCED_FRAME_OFFSET 24
+#define RX_COALESCED_FRAMES 8
+#define RX_COALESCED_TIMER 0x74
+#define QLEN_SET 0x0344
+#define RX_DEPTH_OFFSET 8
+#define MAX_HW_FIFO_DEPTH 64
+#define HW_TX_FIFO_DEPTH 12
+#define HW_RX_FIFO_DEPTH (MAX_HW_FIFO_DEPTH - HW_TX_FIFO_DEPTH)
+#define IQFRM_DES 0x0354
+#define RX_FRAME_LEN_MASK GENMASK(11, 0)
+#define IQ_ADDR 0x0358
+#define EQ_ADDR 0x0360
+#define EQFRM_LEN 0x0364
+#define ADDRQ_STAT 0x036C
+#define TX_CNT_INUSE_MASK GENMASK(5, 0)
+#define BIT_TX_READY BIT(24)
+#define BIT_RX_READY BIT(25)
+/* global control register list */
+#define GLB_HOSTMAC_L32 0x0000
+#define GLB_HOSTMAC_H16 0x0004
+#define GLB_SOFT_RESET 0x0008
+#define SOFT_RESET_ALL BIT(0)
+#define GLB_FWCTRL 0x0010
+#define FWCTRL_VLAN_ENABLE BIT(0)
+#define FWCTRL_FW2CPU_ENA BIT(5)
+#define FWCTRL_FWALL2CPU BIT(7)
+#define GLB_MACTCTRL 0x0014
+#define MACTCTRL_UNI2CPU BIT(1)
+#define MACTCTRL_MULTI2CPU BIT(3)
+#define MACTCTRL_BROAD2CPU BIT(5)
+#define MACTCTRL_MACT_ENA BIT(7)
+#define GLB_IRQ_STAT 0x0030
+#define GLB_IRQ_ENA 0x0034
+#define IRQ_ENA_PORT0_MASK GENMASK(7, 0)
+#define IRQ_ENA_PORT0 BIT(18)
+#define IRQ_ENA_ALL BIT(19)
+#define GLB_IRQ_RAW 0x0038
+#define IRQ_INT_RX_RDY BIT(0)
+#define IRQ_INT_TX_PER_PACKET BIT(1)
+#define IRQ_INT_TX_FIFO_EMPTY BIT(6)
+#define IRQ_INT_MULTI_RXRDY BIT(7)
+#define DEF_INT_MASK (IRQ_INT_MULTI_RXRDY | \
+ IRQ_INT_TX_PER_PACKET | \
+ IRQ_INT_TX_FIFO_EMPTY)
+#define GLB_MAC_L32_BASE 0x0100
+#define GLB_MAC_H16_BASE 0x0104
+#define MACFLT_HI16_MASK GENMASK(15, 0)
+#define BIT_MACFLT_ENA BIT(17)
+#define BIT_MACFLT_FW2CPU BIT(21)
+#define GLB_MAC_H16(reg) (GLB_MAC_H16_BASE + ((reg) * 0x8))
+#define GLB_MAC_L32(reg) (GLB_MAC_L32_BASE + ((reg) * 0x8))
+#define MAX_MAC_FILTER_NUM 8
+#define MAX_UNICAST_ADDRESSES 2
+#define MAX_MULTICAST_ADDRESSES (MAX_MAC_FILTER_NUM - \
+ MAX_UNICAST_ADDRESSES)
+/* software tx and rx queue number, should be power of 2 */
+#define TXQ_NUM 64
+#define RXQ_NUM 128
+#define FEMAC_POLL_WEIGHT 16
+
+#define PHY_RESET_DELAYS_PROPERTY "hisilicon,phy-reset-delays-us"
+
+enum phy_reset_delays {
+ PRE_DELAY,
+ PULSE,
+ POST_DELAY,
+ DELAYS_NUM,
+};
+
+struct hisi_femac_queue {
+ struct sk_buff **skb;
+ dma_addr_t *dma_phys;
+ int num;
+ unsigned int head;
+ unsigned int tail;
+};
+
+struct hisi_femac_priv {
+ void __iomem *port_base;
+ void __iomem *glb_base;
+ struct clk *clk;
+ struct reset_control *mac_rst;
+ struct reset_control *phy_rst;
+ u32 phy_reset_delays[DELAYS_NUM];
+ u32 link_status;
+
+ struct device *dev;
+ struct net_device *ndev;
+
+ struct hisi_femac_queue txq;
+ struct hisi_femac_queue rxq;
+ u32 tx_fifo_used_cnt;
+ struct napi_struct napi;
+};
+
+static void hisi_femac_irq_enable(struct hisi_femac_priv *priv, int irqs)
+{
+ u32 val;
+
+ val = readl(priv->glb_base + GLB_IRQ_ENA);
+ writel(val | irqs, priv->glb_base + GLB_IRQ_ENA);
+}
+
+static void hisi_femac_irq_disable(struct hisi_femac_priv *priv, int irqs)
+{
+ u32 val;
+
+ val = readl(priv->glb_base + GLB_IRQ_ENA);
+ writel(val & (~irqs), priv->glb_base + GLB_IRQ_ENA);
+}
+
+static void hisi_femac_tx_dma_unmap(struct hisi_femac_priv *priv,
+ struct sk_buff *skb, unsigned int pos)
+{
+ dma_addr_t dma_addr;
+
+ dma_addr = priv->txq.dma_phys[pos];
+ dma_unmap_single(priv->dev, dma_addr, skb->len, DMA_TO_DEVICE);
+}
+
+static void hisi_femac_xmit_reclaim(struct net_device *dev)
+{
+ struct sk_buff *skb;
+ struct hisi_femac_priv *priv = netdev_priv(dev);
+ struct hisi_femac_queue *txq = &priv->txq;
+ unsigned int bytes_compl = 0, pkts_compl = 0;
+ u32 val;
+
+ netif_tx_lock(dev);
+
+ val = readl(priv->port_base + ADDRQ_STAT) & TX_CNT_INUSE_MASK;
+ while (val < priv->tx_fifo_used_cnt) {
+ skb = txq->skb[txq->tail];
+ if (unlikely(!skb)) {
+ netdev_err(dev, "xmitq_cnt_inuse=%d, tx_fifo_used=%d\n",
+ val, priv->tx_fifo_used_cnt);
+ break;
+ }
+ hisi_femac_tx_dma_unmap(priv, skb, txq->tail);
+ pkts_compl++;
+ bytes_compl += skb->len;
+ dev_kfree_skb_any(skb);
+
+ priv->tx_fifo_used_cnt--;
+
+ val = readl(priv->port_base + ADDRQ_STAT) & TX_CNT_INUSE_MASK;
+ txq->skb[txq->tail] = NULL;
+ txq->tail = (txq->tail + 1) % txq->num;
+ }
+
+ netdev_completed_queue(dev, pkts_compl, bytes_compl);
+
+ if (unlikely(netif_queue_stopped(dev)) && pkts_compl)
+ netif_wake_queue(dev);
+
+ netif_tx_unlock(dev);
+}
+
+static void hisi_femac_adjust_link(struct net_device *dev)
+{
+ struct hisi_femac_priv *priv = netdev_priv(dev);
+ struct phy_device *phy = dev->phydev;
+ u32 status = 0;
+
+ if (phy->link)
+ status |= MAC_PORTSET_LINKED;
+ if (phy->duplex == DUPLEX_FULL)
+ status |= MAC_PORTSET_DUPLEX_FULL;
+ if (phy->speed == SPEED_100)
+ status |= MAC_PORTSET_SPEED_100M;
+
+ if ((status != priv->link_status) &&
+ ((status | priv->link_status) & MAC_PORTSET_LINKED)) {
+ writel(status, priv->port_base + MAC_PORTSET);
+ priv->link_status = status;
+ phy_print_status(phy);
+ }
+}
+
+static void hisi_femac_rx_refill(struct hisi_femac_priv *priv)
+{
+ struct hisi_femac_queue *rxq = &priv->rxq;
+ struct sk_buff *skb;
+ u32 pos;
+ u32 len = MAX_FRAME_SIZE;
+ dma_addr_t addr;
+
+ pos = rxq->head;
+ while (readl(priv->port_base + ADDRQ_STAT) & BIT_RX_READY) {
+ if (!CIRC_SPACE(pos, rxq->tail, rxq->num))
+ break;
+ if (unlikely(rxq->skb[pos])) {
+ netdev_err(priv->ndev, "err skb[%d]=%p\n",
+ pos, rxq->skb[pos]);
+ break;
+ }
+ skb = netdev_alloc_skb_ip_align(priv->ndev, len);
+ if (unlikely(!skb))
+ break;
+
+ addr = dma_map_single(priv->dev, skb->data, len,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(priv->dev, addr)) {
+ dev_kfree_skb_any(skb);
+ break;
+ }
+ rxq->dma_phys[pos] = addr;
+ rxq->skb[pos] = skb;
+ writel(addr, priv->port_base + IQ_ADDR);
+ pos = (pos + 1) % rxq->num;
+ }
+ rxq->head = pos;
+}
+
+static int hisi_femac_rx(struct net_device *dev, int limit)
+{
+ struct hisi_femac_priv *priv = netdev_priv(dev);
+ struct hisi_femac_queue *rxq = &priv->rxq;
+ struct sk_buff *skb;
+ dma_addr_t addr;
+ u32 rx_pkt_info, pos, len, rx_pkts_num = 0;
+
+ pos = rxq->tail;
+ while (readl(priv->glb_base + GLB_IRQ_RAW) & IRQ_INT_RX_RDY) {
+ rx_pkt_info = readl(priv->port_base + IQFRM_DES);
+ len = rx_pkt_info & RX_FRAME_LEN_MASK;
+ len -= ETH_FCS_LEN;
+
+ /* tell hardware we will deal with this packet */
+ writel(IRQ_INT_RX_RDY, priv->glb_base + GLB_IRQ_RAW);
+
+ rx_pkts_num++;
+
+ skb = rxq->skb[pos];
+ if (unlikely(!skb)) {
+ netdev_err(dev, "rx skb NULL. pos=%d\n", pos);
+ break;
+ }
+ rxq->skb[pos] = NULL;
+
+ addr = rxq->dma_phys[pos];
+ dma_unmap_single(priv->dev, addr, MAX_FRAME_SIZE,
+ DMA_FROM_DEVICE);
+ skb_put(skb, len);
+ if (unlikely(skb->len > MAX_FRAME_SIZE)) {
+ netdev_err(dev, "rcv len err, len = %d\n", skb->len);
+ dev->stats.rx_errors++;
+ dev->stats.rx_length_errors++;
+ dev_kfree_skb_any(skb);
+ goto next;
+ }
+
+ skb->protocol = eth_type_trans(skb, dev);
+ napi_gro_receive(&priv->napi, skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += len;
+next:
+ pos = (pos + 1) % rxq->num;
+ if (rx_pkts_num >= limit)
+ break;
+ }
+ rxq->tail = pos;
+
+ hisi_femac_rx_refill(priv);
+
+ return rx_pkts_num;
+}
+
+static int hisi_femac_poll(struct napi_struct *napi, int budget)
+{
+ struct hisi_femac_priv *priv = container_of(napi,
+ struct hisi_femac_priv, napi);
+ struct net_device *dev = priv->ndev;
+ int work_done = 0, task = budget;
+ int ints, num;
+
+ do {
+ hisi_femac_xmit_reclaim(dev);
+ num = hisi_femac_rx(dev, task);
+ work_done += num;
+ task -= num;
+ if (work_done >= budget)
+ break;
+
+ ints = readl(priv->glb_base + GLB_IRQ_RAW);
+ writel(ints & DEF_INT_MASK,
+ priv->glb_base + GLB_IRQ_RAW);
+ } while (ints & DEF_INT_MASK);
+
+ if (work_done < budget) {
+ napi_complete_done(napi, work_done);
+ hisi_femac_irq_enable(priv, DEF_INT_MASK &
+ (~IRQ_INT_TX_PER_PACKET));
+ }
+
+ return work_done;
+}
+
+static irqreturn_t hisi_femac_interrupt(int irq, void *dev_id)
+{
+ int ints;
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct hisi_femac_priv *priv = netdev_priv(dev);
+
+ ints = readl(priv->glb_base + GLB_IRQ_RAW);
+
+ if (likely(ints & DEF_INT_MASK)) {
+ writel(ints & DEF_INT_MASK,
+ priv->glb_base + GLB_IRQ_RAW);
+ hisi_femac_irq_disable(priv, DEF_INT_MASK);
+ napi_schedule(&priv->napi);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int hisi_femac_init_queue(struct device *dev,
+ struct hisi_femac_queue *queue,
+ unsigned int num)
+{
+ queue->skb = devm_kcalloc(dev, num, sizeof(struct sk_buff *),
+ GFP_KERNEL);
+ if (!queue->skb)
+ return -ENOMEM;
+
+ queue->dma_phys = devm_kcalloc(dev, num, sizeof(dma_addr_t),
+ GFP_KERNEL);
+ if (!queue->dma_phys)
+ return -ENOMEM;
+
+ queue->num = num;
+ queue->head = 0;
+ queue->tail = 0;
+
+ return 0;
+}
+
+static int hisi_femac_init_tx_and_rx_queues(struct hisi_femac_priv *priv)
+{
+ int ret;
+
+ ret = hisi_femac_init_queue(priv->dev, &priv->txq, TXQ_NUM);
+ if (ret)
+ return ret;
+
+ ret = hisi_femac_init_queue(priv->dev, &priv->rxq, RXQ_NUM);
+ if (ret)
+ return ret;
+
+ priv->tx_fifo_used_cnt = 0;
+
+ return 0;
+}
+
+static void hisi_femac_free_skb_rings(struct hisi_femac_priv *priv)
+{
+ struct hisi_femac_queue *txq = &priv->txq;
+ struct hisi_femac_queue *rxq = &priv->rxq;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ u32 pos;
+
+ pos = rxq->tail;
+ while (pos != rxq->head) {
+ skb = rxq->skb[pos];
+ if (unlikely(!skb)) {
+ netdev_err(priv->ndev, "NULL rx skb. pos=%d, head=%d\n",
+ pos, rxq->head);
+ continue;
+ }
+
+ dma_addr = rxq->dma_phys[pos];
+ dma_unmap_single(priv->dev, dma_addr, MAX_FRAME_SIZE,
+ DMA_FROM_DEVICE);
+
+ dev_kfree_skb_any(skb);
+ rxq->skb[pos] = NULL;
+ pos = (pos + 1) % rxq->num;
+ }
+ rxq->tail = pos;
+
+ pos = txq->tail;
+ while (pos != txq->head) {
+ skb = txq->skb[pos];
+ if (unlikely(!skb)) {
+ netdev_err(priv->ndev, "NULL tx skb. pos=%d, head=%d\n",
+ pos, txq->head);
+ continue;
+ }
+ hisi_femac_tx_dma_unmap(priv, skb, pos);
+ dev_kfree_skb_any(skb);
+ txq->skb[pos] = NULL;
+ pos = (pos + 1) % txq->num;
+ }
+ txq->tail = pos;
+ priv->tx_fifo_used_cnt = 0;
+}
+
+static int hisi_femac_set_hw_mac_addr(struct hisi_femac_priv *priv,
+ const unsigned char *mac)
+{
+ u32 reg;
+
+ reg = mac[1] | (mac[0] << 8);
+ writel(reg, priv->glb_base + GLB_HOSTMAC_H16);
+
+ reg = mac[5] | (mac[4] << 8) | (mac[3] << 16) | (mac[2] << 24);
+ writel(reg, priv->glb_base + GLB_HOSTMAC_L32);
+
+ return 0;
+}
+
+static int hisi_femac_port_reset(struct hisi_femac_priv *priv)
+{
+ u32 val;
+
+ val = readl(priv->glb_base + GLB_SOFT_RESET);
+ val |= SOFT_RESET_ALL;
+ writel(val, priv->glb_base + GLB_SOFT_RESET);
+
+ usleep_range(500, 800);
+
+ val &= ~SOFT_RESET_ALL;
+ writel(val, priv->glb_base + GLB_SOFT_RESET);
+
+ return 0;
+}
+
+static int hisi_femac_net_open(struct net_device *dev)
+{
+ struct hisi_femac_priv *priv = netdev_priv(dev);
+
+ hisi_femac_port_reset(priv);
+ hisi_femac_set_hw_mac_addr(priv, dev->dev_addr);
+ hisi_femac_rx_refill(priv);
+
+ netif_carrier_off(dev);
+ netdev_reset_queue(dev);
+ netif_start_queue(dev);
+ napi_enable(&priv->napi);
+
+ priv->link_status = 0;
+ if (dev->phydev)
+ phy_start(dev->phydev);
+
+ writel(IRQ_ENA_PORT0_MASK, priv->glb_base + GLB_IRQ_RAW);
+ hisi_femac_irq_enable(priv, IRQ_ENA_ALL | IRQ_ENA_PORT0 | DEF_INT_MASK);
+
+ return 0;
+}
+
+static int hisi_femac_net_close(struct net_device *dev)
+{
+ struct hisi_femac_priv *priv = netdev_priv(dev);
+
+ hisi_femac_irq_disable(priv, IRQ_ENA_PORT0);
+
+ if (dev->phydev)
+ phy_stop(dev->phydev);
+
+ netif_stop_queue(dev);
+ napi_disable(&priv->napi);
+
+ hisi_femac_free_skb_rings(priv);
+
+ return 0;
+}
+
+static netdev_tx_t hisi_femac_net_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct hisi_femac_priv *priv = netdev_priv(dev);
+ struct hisi_femac_queue *txq = &priv->txq;
+ dma_addr_t addr;
+ u32 val;
+
+ val = readl(priv->port_base + ADDRQ_STAT);
+ val &= BIT_TX_READY;
+ if (!val) {
+ hisi_femac_irq_enable(priv, IRQ_INT_TX_PER_PACKET);
+ dev->stats.tx_dropped++;
+ dev->stats.tx_fifo_errors++;
+ netif_stop_queue(dev);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (unlikely(!CIRC_SPACE(txq->head, txq->tail,
+ txq->num))) {
+ hisi_femac_irq_enable(priv, IRQ_INT_TX_PER_PACKET);
+ dev->stats.tx_dropped++;
+ dev->stats.tx_fifo_errors++;
+ netif_stop_queue(dev);
+ return NETDEV_TX_BUSY;
+ }
+
+ addr = dma_map_single(priv->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(priv->dev, addr))) {
+ dev_kfree_skb_any(skb);
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+ txq->dma_phys[txq->head] = addr;
+
+ txq->skb[txq->head] = skb;
+ txq->head = (txq->head + 1) % txq->num;
+
+ writel(addr, priv->port_base + EQ_ADDR);
+ writel(skb->len + ETH_FCS_LEN, priv->port_base + EQFRM_LEN);
+
+ priv->tx_fifo_used_cnt++;
+
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+ netdev_sent_queue(dev, skb->len);
+
+ return NETDEV_TX_OK;
+}
+
+static int hisi_femac_set_mac_address(struct net_device *dev, void *p)
+{
+ struct hisi_femac_priv *priv = netdev_priv(dev);
+ struct sockaddr *skaddr = p;
+
+ if (!is_valid_ether_addr(skaddr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ eth_hw_addr_set(dev, skaddr->sa_data);
+ dev->addr_assign_type &= ~NET_ADDR_RANDOM;
+
+ hisi_femac_set_hw_mac_addr(priv, dev->dev_addr);
+
+ return 0;
+}
+
+static void hisi_femac_enable_hw_addr_filter(struct hisi_femac_priv *priv,
+ unsigned int reg_n, bool enable)
+{
+ u32 val;
+
+ val = readl(priv->glb_base + GLB_MAC_H16(reg_n));
+ if (enable)
+ val |= BIT_MACFLT_ENA;
+ else
+ val &= ~BIT_MACFLT_ENA;
+ writel(val, priv->glb_base + GLB_MAC_H16(reg_n));
+}
+
+static void hisi_femac_set_hw_addr_filter(struct hisi_femac_priv *priv,
+ unsigned char *addr,
+ unsigned int reg_n)
+{
+ unsigned int high, low;
+ u32 val;
+
+ high = GLB_MAC_H16(reg_n);
+ low = GLB_MAC_L32(reg_n);
+
+ val = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | addr[5];
+ writel(val, priv->glb_base + low);
+
+ val = readl(priv->glb_base + high);
+ val &= ~MACFLT_HI16_MASK;
+ val |= ((addr[0] << 8) | addr[1]);
+ val |= (BIT_MACFLT_ENA | BIT_MACFLT_FW2CPU);
+ writel(val, priv->glb_base + high);
+}
+
+static void hisi_femac_set_promisc_mode(struct hisi_femac_priv *priv,
+ bool promisc_mode)
+{
+ u32 val;
+
+ val = readl(priv->glb_base + GLB_FWCTRL);
+ if (promisc_mode)
+ val |= FWCTRL_FWALL2CPU;
+ else
+ val &= ~FWCTRL_FWALL2CPU;
+ writel(val, priv->glb_base + GLB_FWCTRL);
+}
+
+/* Handle multiple multicast addresses (perfect filtering)*/
+static void hisi_femac_set_mc_addr_filter(struct hisi_femac_priv *priv)
+{
+ struct net_device *dev = priv->ndev;
+ u32 val;
+
+ val = readl(priv->glb_base + GLB_MACTCTRL);
+ if ((netdev_mc_count(dev) > MAX_MULTICAST_ADDRESSES) ||
+ (dev->flags & IFF_ALLMULTI)) {
+ val |= MACTCTRL_MULTI2CPU;
+ } else {
+ int reg = MAX_UNICAST_ADDRESSES;
+ int i;
+ struct netdev_hw_addr *ha;
+
+ for (i = reg; i < MAX_MAC_FILTER_NUM; i++)
+ hisi_femac_enable_hw_addr_filter(priv, i, false);
+
+ netdev_for_each_mc_addr(ha, dev) {
+ hisi_femac_set_hw_addr_filter(priv, ha->addr, reg);
+ reg++;
+ }
+ val &= ~MACTCTRL_MULTI2CPU;
+ }
+ writel(val, priv->glb_base + GLB_MACTCTRL);
+}
+
+/* Handle multiple unicast addresses (perfect filtering)*/
+static void hisi_femac_set_uc_addr_filter(struct hisi_femac_priv *priv)
+{
+ struct net_device *dev = priv->ndev;
+ u32 val;
+
+ val = readl(priv->glb_base + GLB_MACTCTRL);
+ if (netdev_uc_count(dev) > MAX_UNICAST_ADDRESSES) {
+ val |= MACTCTRL_UNI2CPU;
+ } else {
+ int reg = 0;
+ int i;
+ struct netdev_hw_addr *ha;
+
+ for (i = reg; i < MAX_UNICAST_ADDRESSES; i++)
+ hisi_femac_enable_hw_addr_filter(priv, i, false);
+
+ netdev_for_each_uc_addr(ha, dev) {
+ hisi_femac_set_hw_addr_filter(priv, ha->addr, reg);
+ reg++;
+ }
+ val &= ~MACTCTRL_UNI2CPU;
+ }
+ writel(val, priv->glb_base + GLB_MACTCTRL);
+}
+
+static void hisi_femac_net_set_rx_mode(struct net_device *dev)
+{
+ struct hisi_femac_priv *priv = netdev_priv(dev);
+
+ if (dev->flags & IFF_PROMISC) {
+ hisi_femac_set_promisc_mode(priv, true);
+ } else {
+ hisi_femac_set_promisc_mode(priv, false);
+ hisi_femac_set_mc_addr_filter(priv);
+ hisi_femac_set_uc_addr_filter(priv);
+ }
+}
+
+static const struct ethtool_ops hisi_femac_ethtools_ops = {
+ .get_link = ethtool_op_get_link,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+};
+
+static const struct net_device_ops hisi_femac_netdev_ops = {
+ .ndo_open = hisi_femac_net_open,
+ .ndo_stop = hisi_femac_net_close,
+ .ndo_start_xmit = hisi_femac_net_xmit,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
+ .ndo_set_mac_address = hisi_femac_set_mac_address,
+ .ndo_set_rx_mode = hisi_femac_net_set_rx_mode,
+};
+
+static void hisi_femac_core_reset(struct hisi_femac_priv *priv)
+{
+ reset_control_assert(priv->mac_rst);
+ reset_control_deassert(priv->mac_rst);
+}
+
+static void hisi_femac_sleep_us(u32 time_us)
+{
+ u32 time_ms;
+
+ if (!time_us)
+ return;
+
+ time_ms = DIV_ROUND_UP(time_us, 1000);
+ if (time_ms < 20)
+ usleep_range(time_us, time_us + 500);
+ else
+ msleep(time_ms);
+}
+
+static void hisi_femac_phy_reset(struct hisi_femac_priv *priv)
+{
+ /* To make sure PHY hardware reset success,
+ * we must keep PHY in deassert state first and
+ * then complete the hardware reset operation
+ */
+ reset_control_deassert(priv->phy_rst);
+ hisi_femac_sleep_us(priv->phy_reset_delays[PRE_DELAY]);
+
+ reset_control_assert(priv->phy_rst);
+ /* delay some time to ensure reset ok,
+ * this depends on PHY hardware feature
+ */
+ hisi_femac_sleep_us(priv->phy_reset_delays[PULSE]);
+ reset_control_deassert(priv->phy_rst);
+ /* delay some time to ensure later MDIO access */
+ hisi_femac_sleep_us(priv->phy_reset_delays[POST_DELAY]);
+}
+
+static void hisi_femac_port_init(struct hisi_femac_priv *priv)
+{
+ u32 val;
+
+ /* MAC gets link status info and phy mode by software config */
+ val = MAC_PORTSEL_STAT_CPU;
+ if (priv->ndev->phydev->interface == PHY_INTERFACE_MODE_RMII)
+ val |= MAC_PORTSEL_RMII;
+ writel(val, priv->port_base + MAC_PORTSEL);
+
+ /*clear all interrupt status */
+ writel(IRQ_ENA_PORT0_MASK, priv->glb_base + GLB_IRQ_RAW);
+ hisi_femac_irq_disable(priv, IRQ_ENA_PORT0_MASK | IRQ_ENA_PORT0);
+
+ val = readl(priv->glb_base + GLB_FWCTRL);
+ val &= ~(FWCTRL_VLAN_ENABLE | FWCTRL_FWALL2CPU);
+ val |= FWCTRL_FW2CPU_ENA;
+ writel(val, priv->glb_base + GLB_FWCTRL);
+
+ val = readl(priv->glb_base + GLB_MACTCTRL);
+ val |= (MACTCTRL_BROAD2CPU | MACTCTRL_MACT_ENA);
+ writel(val, priv->glb_base + GLB_MACTCTRL);
+
+ val = readl(priv->port_base + MAC_SET);
+ val &= ~MAX_FRAME_SIZE_MASK;
+ val |= MAX_FRAME_SIZE;
+ writel(val, priv->port_base + MAC_SET);
+
+ val = RX_COALESCED_TIMER |
+ (RX_COALESCED_FRAMES << RX_COALESCED_FRAME_OFFSET);
+ writel(val, priv->port_base + RX_COALESCE_SET);
+
+ val = (HW_RX_FIFO_DEPTH << RX_DEPTH_OFFSET) | HW_TX_FIFO_DEPTH;
+ writel(val, priv->port_base + QLEN_SET);
+}
+
+static int hisi_femac_drv_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct net_device *ndev;
+ struct hisi_femac_priv *priv;
+ struct phy_device *phy;
+ int ret;
+
+ ndev = alloc_etherdev(sizeof(*priv));
+ if (!ndev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ndev);
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ priv = netdev_priv(ndev);
+ priv->dev = dev;
+ priv->ndev = ndev;
+
+ priv->port_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->port_base)) {
+ ret = PTR_ERR(priv->port_base);
+ goto out_free_netdev;
+ }
+
+ priv->glb_base = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(priv->glb_base)) {
+ ret = PTR_ERR(priv->glb_base);
+ goto out_free_netdev;
+ }
+
+ priv->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(dev, "failed to get clk\n");
+ ret = -ENODEV;
+ goto out_free_netdev;
+ }
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret) {
+ dev_err(dev, "failed to enable clk %d\n", ret);
+ goto out_free_netdev;
+ }
+
+ priv->mac_rst = devm_reset_control_get(dev, "mac");
+ if (IS_ERR(priv->mac_rst)) {
+ ret = PTR_ERR(priv->mac_rst);
+ goto out_disable_clk;
+ }
+ hisi_femac_core_reset(priv);
+
+ priv->phy_rst = devm_reset_control_get(dev, "phy");
+ if (IS_ERR(priv->phy_rst)) {
+ priv->phy_rst = NULL;
+ } else {
+ ret = of_property_read_u32_array(node,
+ PHY_RESET_DELAYS_PROPERTY,
+ priv->phy_reset_delays,
+ DELAYS_NUM);
+ if (ret)
+ goto out_disable_clk;
+ hisi_femac_phy_reset(priv);
+ }
+
+ phy = of_phy_get_and_connect(ndev, node, hisi_femac_adjust_link);
+ if (!phy) {
+ dev_err(dev, "connect to PHY failed!\n");
+ ret = -ENODEV;
+ goto out_disable_clk;
+ }
+
+ phy_attached_print(phy, "phy_id=0x%.8lx, phy_mode=%s\n",
+ (unsigned long)phy->phy_id,
+ phy_modes(phy->interface));
+
+ ret = of_get_ethdev_address(node, ndev);
+ if (ret) {
+ eth_hw_addr_random(ndev);
+ dev_warn(dev, "using random MAC address %pM\n",
+ ndev->dev_addr);
+ }
+
+ ndev->watchdog_timeo = 6 * HZ;
+ ndev->priv_flags |= IFF_UNICAST_FLT;
+ ndev->netdev_ops = &hisi_femac_netdev_ops;
+ ndev->ethtool_ops = &hisi_femac_ethtools_ops;
+ netif_napi_add_weight(ndev, &priv->napi, hisi_femac_poll,
+ FEMAC_POLL_WEIGHT);
+
+ hisi_femac_port_init(priv);
+
+ ret = hisi_femac_init_tx_and_rx_queues(priv);
+ if (ret)
+ goto out_disconnect_phy;
+
+ ndev->irq = platform_get_irq(pdev, 0);
+ if (ndev->irq < 0) {
+ ret = ndev->irq;
+ goto out_disconnect_phy;
+ }
+
+ ret = devm_request_irq(dev, ndev->irq, hisi_femac_interrupt,
+ IRQF_SHARED, pdev->name, ndev);
+ if (ret) {
+ dev_err(dev, "devm_request_irq %d failed!\n", ndev->irq);
+ goto out_disconnect_phy;
+ }
+
+ ret = register_netdev(ndev);
+ if (ret) {
+ dev_err(dev, "register_netdev failed!\n");
+ goto out_disconnect_phy;
+ }
+
+ return ret;
+
+out_disconnect_phy:
+ netif_napi_del(&priv->napi);
+ phy_disconnect(phy);
+out_disable_clk:
+ clk_disable_unprepare(priv->clk);
+out_free_netdev:
+ free_netdev(ndev);
+
+ return ret;
+}
+
+static int hisi_femac_drv_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct hisi_femac_priv *priv = netdev_priv(ndev);
+
+ netif_napi_del(&priv->napi);
+ unregister_netdev(ndev);
+
+ phy_disconnect(ndev->phydev);
+ clk_disable_unprepare(priv->clk);
+ free_netdev(ndev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int hisi_femac_drv_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct hisi_femac_priv *priv = netdev_priv(ndev);
+
+ disable_irq(ndev->irq);
+ if (netif_running(ndev)) {
+ hisi_femac_net_close(ndev);
+ netif_device_detach(ndev);
+ }
+
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+}
+
+static int hisi_femac_drv_resume(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct hisi_femac_priv *priv = netdev_priv(ndev);
+
+ clk_prepare_enable(priv->clk);
+ if (priv->phy_rst)
+ hisi_femac_phy_reset(priv);
+
+ if (netif_running(ndev)) {
+ hisi_femac_port_init(priv);
+ hisi_femac_net_open(ndev);
+ netif_device_attach(ndev);
+ }
+ enable_irq(ndev->irq);
+
+ return 0;
+}
+#endif
+
+static const struct of_device_id hisi_femac_match[] = {
+ {.compatible = "hisilicon,hisi-femac-v1",},
+ {.compatible = "hisilicon,hisi-femac-v2",},
+ {.compatible = "hisilicon,hi3516cv300-femac",},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, hisi_femac_match);
+
+static struct platform_driver hisi_femac_driver = {
+ .driver = {
+ .name = "hisi-femac",
+ .of_match_table = hisi_femac_match,
+ },
+ .probe = hisi_femac_drv_probe,
+ .remove = hisi_femac_drv_remove,
+#ifdef CONFIG_PM
+ .suspend = hisi_femac_drv_suspend,
+ .resume = hisi_femac_drv_resume,
+#endif
+};
+
+module_platform_driver(hisi_femac_driver);
+
+MODULE_DESCRIPTION("Hisilicon Fast Ethernet MAC driver");
+MODULE_AUTHOR("Dongpo Li <lidongpo@hisilicon.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:hisi-femac");
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
new file mode 100644
index 0000000000..26d22bb04b
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -0,0 +1,1329 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Copyright (c) 2014 Linaro Ltd.
+ * Copyright (c) 2014 Hisilicon Limited.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <linux/reset.h>
+#include <linux/clk.h>
+#include <linux/circ_buf.h>
+
+#define STATION_ADDR_LOW 0x0000
+#define STATION_ADDR_HIGH 0x0004
+#define MAC_DUPLEX_HALF_CTRL 0x0008
+#define MAX_FRM_SIZE 0x003c
+#define PORT_MODE 0x0040
+#define PORT_EN 0x0044
+#define BITS_TX_EN BIT(2)
+#define BITS_RX_EN BIT(1)
+#define REC_FILT_CONTROL 0x0064
+#define BIT_CRC_ERR_PASS BIT(5)
+#define BIT_PAUSE_FRM_PASS BIT(4)
+#define BIT_VLAN_DROP_EN BIT(3)
+#define BIT_BC_DROP_EN BIT(2)
+#define BIT_MC_MATCH_EN BIT(1)
+#define BIT_UC_MATCH_EN BIT(0)
+#define PORT_MC_ADDR_LOW 0x0068
+#define PORT_MC_ADDR_HIGH 0x006C
+#define CF_CRC_STRIP 0x01b0
+#define MODE_CHANGE_EN 0x01b4
+#define BIT_MODE_CHANGE_EN BIT(0)
+#define COL_SLOT_TIME 0x01c0
+#define RECV_CONTROL 0x01e0
+#define BIT_STRIP_PAD_EN BIT(3)
+#define BIT_RUNT_PKT_EN BIT(4)
+#define CONTROL_WORD 0x0214
+#define MDIO_SINGLE_CMD 0x03c0
+#define MDIO_SINGLE_DATA 0x03c4
+#define MDIO_CTRL 0x03cc
+#define MDIO_RDATA_STATUS 0x03d0
+
+#define MDIO_START BIT(20)
+#define MDIO_R_VALID BIT(0)
+#define MDIO_READ (BIT(17) | MDIO_START)
+#define MDIO_WRITE (BIT(16) | MDIO_START)
+
+#define RX_FQ_START_ADDR 0x0500
+#define RX_FQ_DEPTH 0x0504
+#define RX_FQ_WR_ADDR 0x0508
+#define RX_FQ_RD_ADDR 0x050c
+#define RX_FQ_VLDDESC_CNT 0x0510
+#define RX_FQ_ALEMPTY_TH 0x0514
+#define RX_FQ_REG_EN 0x0518
+#define BITS_RX_FQ_START_ADDR_EN BIT(2)
+#define BITS_RX_FQ_DEPTH_EN BIT(1)
+#define BITS_RX_FQ_RD_ADDR_EN BIT(0)
+#define RX_FQ_ALFULL_TH 0x051c
+#define RX_BQ_START_ADDR 0x0520
+#define RX_BQ_DEPTH 0x0524
+#define RX_BQ_WR_ADDR 0x0528
+#define RX_BQ_RD_ADDR 0x052c
+#define RX_BQ_FREE_DESC_CNT 0x0530
+#define RX_BQ_ALEMPTY_TH 0x0534
+#define RX_BQ_REG_EN 0x0538
+#define BITS_RX_BQ_START_ADDR_EN BIT(2)
+#define BITS_RX_BQ_DEPTH_EN BIT(1)
+#define BITS_RX_BQ_WR_ADDR_EN BIT(0)
+#define RX_BQ_ALFULL_TH 0x053c
+#define TX_BQ_START_ADDR 0x0580
+#define TX_BQ_DEPTH 0x0584
+#define TX_BQ_WR_ADDR 0x0588
+#define TX_BQ_RD_ADDR 0x058c
+#define TX_BQ_VLDDESC_CNT 0x0590
+#define TX_BQ_ALEMPTY_TH 0x0594
+#define TX_BQ_REG_EN 0x0598
+#define BITS_TX_BQ_START_ADDR_EN BIT(2)
+#define BITS_TX_BQ_DEPTH_EN BIT(1)
+#define BITS_TX_BQ_RD_ADDR_EN BIT(0)
+#define TX_BQ_ALFULL_TH 0x059c
+#define TX_RQ_START_ADDR 0x05a0
+#define TX_RQ_DEPTH 0x05a4
+#define TX_RQ_WR_ADDR 0x05a8
+#define TX_RQ_RD_ADDR 0x05ac
+#define TX_RQ_FREE_DESC_CNT 0x05b0
+#define TX_RQ_ALEMPTY_TH 0x05b4
+#define TX_RQ_REG_EN 0x05b8
+#define BITS_TX_RQ_START_ADDR_EN BIT(2)
+#define BITS_TX_RQ_DEPTH_EN BIT(1)
+#define BITS_TX_RQ_WR_ADDR_EN BIT(0)
+#define TX_RQ_ALFULL_TH 0x05bc
+#define RAW_PMU_INT 0x05c0
+#define ENA_PMU_INT 0x05c4
+#define STATUS_PMU_INT 0x05c8
+#define MAC_FIFO_ERR_IN BIT(30)
+#define TX_RQ_IN_TIMEOUT_INT BIT(29)
+#define RX_BQ_IN_TIMEOUT_INT BIT(28)
+#define TXOUTCFF_FULL_INT BIT(27)
+#define TXOUTCFF_EMPTY_INT BIT(26)
+#define TXCFF_FULL_INT BIT(25)
+#define TXCFF_EMPTY_INT BIT(24)
+#define RXOUTCFF_FULL_INT BIT(23)
+#define RXOUTCFF_EMPTY_INT BIT(22)
+#define RXCFF_FULL_INT BIT(21)
+#define RXCFF_EMPTY_INT BIT(20)
+#define TX_RQ_IN_INT BIT(19)
+#define TX_BQ_OUT_INT BIT(18)
+#define RX_BQ_IN_INT BIT(17)
+#define RX_FQ_OUT_INT BIT(16)
+#define TX_RQ_EMPTY_INT BIT(15)
+#define TX_RQ_FULL_INT BIT(14)
+#define TX_RQ_ALEMPTY_INT BIT(13)
+#define TX_RQ_ALFULL_INT BIT(12)
+#define TX_BQ_EMPTY_INT BIT(11)
+#define TX_BQ_FULL_INT BIT(10)
+#define TX_BQ_ALEMPTY_INT BIT(9)
+#define TX_BQ_ALFULL_INT BIT(8)
+#define RX_BQ_EMPTY_INT BIT(7)
+#define RX_BQ_FULL_INT BIT(6)
+#define RX_BQ_ALEMPTY_INT BIT(5)
+#define RX_BQ_ALFULL_INT BIT(4)
+#define RX_FQ_EMPTY_INT BIT(3)
+#define RX_FQ_FULL_INT BIT(2)
+#define RX_FQ_ALEMPTY_INT BIT(1)
+#define RX_FQ_ALFULL_INT BIT(0)
+
+#define DEF_INT_MASK (RX_BQ_IN_INT | RX_BQ_IN_TIMEOUT_INT | \
+ TX_RQ_IN_INT | TX_RQ_IN_TIMEOUT_INT)
+
+#define DESC_WR_RD_ENA 0x05cc
+#define IN_QUEUE_TH 0x05d8
+#define OUT_QUEUE_TH 0x05dc
+#define QUEUE_TX_BQ_SHIFT 16
+#define RX_BQ_IN_TIMEOUT_TH 0x05e0
+#define TX_RQ_IN_TIMEOUT_TH 0x05e4
+#define STOP_CMD 0x05e8
+#define BITS_TX_STOP BIT(1)
+#define BITS_RX_STOP BIT(0)
+#define FLUSH_CMD 0x05eC
+#define BITS_TX_FLUSH_CMD BIT(5)
+#define BITS_RX_FLUSH_CMD BIT(4)
+#define BITS_TX_FLUSH_FLAG_DOWN BIT(3)
+#define BITS_TX_FLUSH_FLAG_UP BIT(2)
+#define BITS_RX_FLUSH_FLAG_DOWN BIT(1)
+#define BITS_RX_FLUSH_FLAG_UP BIT(0)
+#define RX_CFF_NUM_REG 0x05f0
+#define PMU_FSM_REG 0x05f8
+#define RX_FIFO_PKT_IN_NUM 0x05fc
+#define RX_FIFO_PKT_OUT_NUM 0x0600
+
+#define RGMII_SPEED_1000 0x2c
+#define RGMII_SPEED_100 0x2f
+#define RGMII_SPEED_10 0x2d
+#define MII_SPEED_100 0x0f
+#define MII_SPEED_10 0x0d
+#define GMAC_SPEED_1000 0x05
+#define GMAC_SPEED_100 0x01
+#define GMAC_SPEED_10 0x00
+#define GMAC_FULL_DUPLEX BIT(4)
+
+#define RX_BQ_INT_THRESHOLD 0x01
+#define TX_RQ_INT_THRESHOLD 0x01
+#define RX_BQ_IN_TIMEOUT 0x10000
+#define TX_RQ_IN_TIMEOUT 0x50000
+
+#define MAC_MAX_FRAME_SIZE 1600
+#define DESC_SIZE 32
+#define RX_DESC_NUM 1024
+#define TX_DESC_NUM 1024
+
+#define DESC_VLD_FREE 0
+#define DESC_VLD_BUSY 0x80000000
+#define DESC_FL_MID 0
+#define DESC_FL_LAST 0x20000000
+#define DESC_FL_FIRST 0x40000000
+#define DESC_FL_FULL 0x60000000
+#define DESC_DATA_LEN_OFF 16
+#define DESC_BUFF_LEN_OFF 0
+#define DESC_DATA_MASK 0x7ff
+#define DESC_SG BIT(30)
+#define DESC_FRAGS_NUM_OFF 11
+
+/* DMA descriptor ring helpers */
+#define dma_ring_incr(n, s) (((n) + 1) & ((s) - 1))
+#define dma_cnt(n) ((n) >> 5)
+#define dma_byte(n) ((n) << 5)
+
+#define HW_CAP_TSO BIT(0)
+#define GEMAC_V1 0
+#define GEMAC_V2 (GEMAC_V1 | HW_CAP_TSO)
+#define HAS_CAP_TSO(hw_cap) ((hw_cap) & HW_CAP_TSO)
+
+#define PHY_RESET_DELAYS_PROPERTY "hisilicon,phy-reset-delays-us"
+
+enum phy_reset_delays {
+ PRE_DELAY,
+ PULSE,
+ POST_DELAY,
+ DELAYS_NUM,
+};
+
+struct hix5hd2_desc {
+ __le32 buff_addr;
+ __le32 cmd;
+} __aligned(32);
+
+struct hix5hd2_desc_sw {
+ struct hix5hd2_desc *desc;
+ dma_addr_t phys_addr;
+ unsigned int count;
+ unsigned int size;
+};
+
+struct hix5hd2_sg_desc_ring {
+ struct sg_desc *desc;
+ dma_addr_t phys_addr;
+};
+
+struct frags_info {
+ __le32 addr;
+ __le32 size;
+};
+
+/* hardware supported max skb frags num */
+#define SG_MAX_SKB_FRAGS 17
+struct sg_desc {
+ __le32 total_len;
+ __le32 resvd0;
+ __le32 linear_addr;
+ __le32 linear_len;
+ /* reserve one more frags for memory alignment */
+ struct frags_info frags[SG_MAX_SKB_FRAGS + 1];
+};
+
+#define QUEUE_NUMS 4
+struct hix5hd2_priv {
+ struct hix5hd2_desc_sw pool[QUEUE_NUMS];
+#define rx_fq pool[0]
+#define rx_bq pool[1]
+#define tx_bq pool[2]
+#define tx_rq pool[3]
+ struct hix5hd2_sg_desc_ring tx_ring;
+
+ void __iomem *base;
+ void __iomem *ctrl_base;
+
+ struct sk_buff *tx_skb[TX_DESC_NUM];
+ struct sk_buff *rx_skb[RX_DESC_NUM];
+
+ struct device *dev;
+ struct net_device *netdev;
+
+ struct device_node *phy_node;
+ phy_interface_t phy_mode;
+
+ unsigned long hw_cap;
+ unsigned int speed;
+ unsigned int duplex;
+
+ struct clk *mac_core_clk;
+ struct clk *mac_ifc_clk;
+ struct reset_control *mac_core_rst;
+ struct reset_control *mac_ifc_rst;
+ struct reset_control *phy_rst;
+ u32 phy_reset_delays[DELAYS_NUM];
+ struct mii_bus *bus;
+ struct napi_struct napi;
+ struct work_struct tx_timeout_task;
+};
+
+static inline void hix5hd2_mac_interface_reset(struct hix5hd2_priv *priv)
+{
+ if (!priv->mac_ifc_rst)
+ return;
+
+ reset_control_assert(priv->mac_ifc_rst);
+ reset_control_deassert(priv->mac_ifc_rst);
+}
+
+static void hix5hd2_config_port(struct net_device *dev, u32 speed, u32 duplex)
+{
+ struct hix5hd2_priv *priv = netdev_priv(dev);
+ u32 val;
+
+ priv->speed = speed;
+ priv->duplex = duplex;
+
+ switch (priv->phy_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ if (speed == SPEED_1000)
+ val = RGMII_SPEED_1000;
+ else if (speed == SPEED_100)
+ val = RGMII_SPEED_100;
+ else
+ val = RGMII_SPEED_10;
+ break;
+ case PHY_INTERFACE_MODE_MII:
+ if (speed == SPEED_100)
+ val = MII_SPEED_100;
+ else
+ val = MII_SPEED_10;
+ break;
+ default:
+ netdev_warn(dev, "not supported mode\n");
+ val = MII_SPEED_10;
+ break;
+ }
+
+ if (duplex)
+ val |= GMAC_FULL_DUPLEX;
+ writel_relaxed(val, priv->ctrl_base);
+ hix5hd2_mac_interface_reset(priv);
+
+ writel_relaxed(BIT_MODE_CHANGE_EN, priv->base + MODE_CHANGE_EN);
+ if (speed == SPEED_1000)
+ val = GMAC_SPEED_1000;
+ else if (speed == SPEED_100)
+ val = GMAC_SPEED_100;
+ else
+ val = GMAC_SPEED_10;
+ writel_relaxed(val, priv->base + PORT_MODE);
+ writel_relaxed(0, priv->base + MODE_CHANGE_EN);
+ writel_relaxed(duplex, priv->base + MAC_DUPLEX_HALF_CTRL);
+}
+
+static void hix5hd2_set_desc_depth(struct hix5hd2_priv *priv, int rx, int tx)
+{
+ writel_relaxed(BITS_RX_FQ_DEPTH_EN, priv->base + RX_FQ_REG_EN);
+ writel_relaxed(rx << 3, priv->base + RX_FQ_DEPTH);
+ writel_relaxed(0, priv->base + RX_FQ_REG_EN);
+
+ writel_relaxed(BITS_RX_BQ_DEPTH_EN, priv->base + RX_BQ_REG_EN);
+ writel_relaxed(rx << 3, priv->base + RX_BQ_DEPTH);
+ writel_relaxed(0, priv->base + RX_BQ_REG_EN);
+
+ writel_relaxed(BITS_TX_BQ_DEPTH_EN, priv->base + TX_BQ_REG_EN);
+ writel_relaxed(tx << 3, priv->base + TX_BQ_DEPTH);
+ writel_relaxed(0, priv->base + TX_BQ_REG_EN);
+
+ writel_relaxed(BITS_TX_RQ_DEPTH_EN, priv->base + TX_RQ_REG_EN);
+ writel_relaxed(tx << 3, priv->base + TX_RQ_DEPTH);
+ writel_relaxed(0, priv->base + TX_RQ_REG_EN);
+}
+
+static void hix5hd2_set_rx_fq(struct hix5hd2_priv *priv, dma_addr_t phy_addr)
+{
+ writel_relaxed(BITS_RX_FQ_START_ADDR_EN, priv->base + RX_FQ_REG_EN);
+ writel_relaxed(phy_addr, priv->base + RX_FQ_START_ADDR);
+ writel_relaxed(0, priv->base + RX_FQ_REG_EN);
+}
+
+static void hix5hd2_set_rx_bq(struct hix5hd2_priv *priv, dma_addr_t phy_addr)
+{
+ writel_relaxed(BITS_RX_BQ_START_ADDR_EN, priv->base + RX_BQ_REG_EN);
+ writel_relaxed(phy_addr, priv->base + RX_BQ_START_ADDR);
+ writel_relaxed(0, priv->base + RX_BQ_REG_EN);
+}
+
+static void hix5hd2_set_tx_bq(struct hix5hd2_priv *priv, dma_addr_t phy_addr)
+{
+ writel_relaxed(BITS_TX_BQ_START_ADDR_EN, priv->base + TX_BQ_REG_EN);
+ writel_relaxed(phy_addr, priv->base + TX_BQ_START_ADDR);
+ writel_relaxed(0, priv->base + TX_BQ_REG_EN);
+}
+
+static void hix5hd2_set_tx_rq(struct hix5hd2_priv *priv, dma_addr_t phy_addr)
+{
+ writel_relaxed(BITS_TX_RQ_START_ADDR_EN, priv->base + TX_RQ_REG_EN);
+ writel_relaxed(phy_addr, priv->base + TX_RQ_START_ADDR);
+ writel_relaxed(0, priv->base + TX_RQ_REG_EN);
+}
+
+static void hix5hd2_set_desc_addr(struct hix5hd2_priv *priv)
+{
+ hix5hd2_set_rx_fq(priv, priv->rx_fq.phys_addr);
+ hix5hd2_set_rx_bq(priv, priv->rx_bq.phys_addr);
+ hix5hd2_set_tx_rq(priv, priv->tx_rq.phys_addr);
+ hix5hd2_set_tx_bq(priv, priv->tx_bq.phys_addr);
+}
+
+static void hix5hd2_hw_init(struct hix5hd2_priv *priv)
+{
+ u32 val;
+
+ /* disable and clear all interrupts */
+ writel_relaxed(0, priv->base + ENA_PMU_INT);
+ writel_relaxed(~0, priv->base + RAW_PMU_INT);
+
+ writel_relaxed(BIT_CRC_ERR_PASS, priv->base + REC_FILT_CONTROL);
+ writel_relaxed(MAC_MAX_FRAME_SIZE, priv->base + CONTROL_WORD);
+ writel_relaxed(0, priv->base + COL_SLOT_TIME);
+
+ val = RX_BQ_INT_THRESHOLD | TX_RQ_INT_THRESHOLD << QUEUE_TX_BQ_SHIFT;
+ writel_relaxed(val, priv->base + IN_QUEUE_TH);
+
+ writel_relaxed(RX_BQ_IN_TIMEOUT, priv->base + RX_BQ_IN_TIMEOUT_TH);
+ writel_relaxed(TX_RQ_IN_TIMEOUT, priv->base + TX_RQ_IN_TIMEOUT_TH);
+
+ hix5hd2_set_desc_depth(priv, RX_DESC_NUM, TX_DESC_NUM);
+ hix5hd2_set_desc_addr(priv);
+}
+
+static void hix5hd2_irq_enable(struct hix5hd2_priv *priv)
+{
+ writel_relaxed(DEF_INT_MASK, priv->base + ENA_PMU_INT);
+}
+
+static void hix5hd2_irq_disable(struct hix5hd2_priv *priv)
+{
+ writel_relaxed(0, priv->base + ENA_PMU_INT);
+}
+
+static void hix5hd2_port_enable(struct hix5hd2_priv *priv)
+{
+ writel_relaxed(0xf, priv->base + DESC_WR_RD_ENA);
+ writel_relaxed(BITS_RX_EN | BITS_TX_EN, priv->base + PORT_EN);
+}
+
+static void hix5hd2_port_disable(struct hix5hd2_priv *priv)
+{
+ writel_relaxed(~(u32)(BITS_RX_EN | BITS_TX_EN), priv->base + PORT_EN);
+ writel_relaxed(0, priv->base + DESC_WR_RD_ENA);
+}
+
+static void hix5hd2_hw_set_mac_addr(struct net_device *dev)
+{
+ struct hix5hd2_priv *priv = netdev_priv(dev);
+ const unsigned char *mac = dev->dev_addr;
+ u32 val;
+
+ val = mac[1] | (mac[0] << 8);
+ writel_relaxed(val, priv->base + STATION_ADDR_HIGH);
+
+ val = mac[5] | (mac[4] << 8) | (mac[3] << 16) | (mac[2] << 24);
+ writel_relaxed(val, priv->base + STATION_ADDR_LOW);
+}
+
+static int hix5hd2_net_set_mac_address(struct net_device *dev, void *p)
+{
+ int ret;
+
+ ret = eth_mac_addr(dev, p);
+ if (!ret)
+ hix5hd2_hw_set_mac_addr(dev);
+
+ return ret;
+}
+
+static void hix5hd2_adjust_link(struct net_device *dev)
+{
+ struct hix5hd2_priv *priv = netdev_priv(dev);
+ struct phy_device *phy = dev->phydev;
+
+ if ((priv->speed != phy->speed) || (priv->duplex != phy->duplex)) {
+ hix5hd2_config_port(dev, phy->speed, phy->duplex);
+ phy_print_status(phy);
+ }
+}
+
+static void hix5hd2_rx_refill(struct hix5hd2_priv *priv)
+{
+ struct hix5hd2_desc *desc;
+ struct sk_buff *skb;
+ u32 start, end, num, pos, i;
+ u32 len = MAC_MAX_FRAME_SIZE;
+ dma_addr_t addr;
+
+ /* software write pointer */
+ start = dma_cnt(readl_relaxed(priv->base + RX_FQ_WR_ADDR));
+ /* logic read pointer */
+ end = dma_cnt(readl_relaxed(priv->base + RX_FQ_RD_ADDR));
+ num = CIRC_SPACE(start, end, RX_DESC_NUM);
+
+ for (i = 0, pos = start; i < num; i++) {
+ if (priv->rx_skb[pos]) {
+ break;
+ } else {
+ skb = netdev_alloc_skb_ip_align(priv->netdev, len);
+ if (unlikely(skb == NULL))
+ break;
+ }
+
+ addr = dma_map_single(priv->dev, skb->data, len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(priv->dev, addr)) {
+ dev_kfree_skb_any(skb);
+ break;
+ }
+
+ desc = priv->rx_fq.desc + pos;
+ desc->buff_addr = cpu_to_le32(addr);
+ priv->rx_skb[pos] = skb;
+ desc->cmd = cpu_to_le32(DESC_VLD_FREE |
+ (len - 1) << DESC_BUFF_LEN_OFF);
+ pos = dma_ring_incr(pos, RX_DESC_NUM);
+ }
+
+ /* ensure desc updated */
+ wmb();
+
+ if (pos != start)
+ writel_relaxed(dma_byte(pos), priv->base + RX_FQ_WR_ADDR);
+}
+
+static int hix5hd2_rx(struct net_device *dev, int limit)
+{
+ struct hix5hd2_priv *priv = netdev_priv(dev);
+ struct sk_buff *skb;
+ struct hix5hd2_desc *desc;
+ dma_addr_t addr;
+ u32 start, end, num, pos, i, len;
+
+ /* software read pointer */
+ start = dma_cnt(readl_relaxed(priv->base + RX_BQ_RD_ADDR));
+ /* logic write pointer */
+ end = dma_cnt(readl_relaxed(priv->base + RX_BQ_WR_ADDR));
+ num = CIRC_CNT(end, start, RX_DESC_NUM);
+ if (num > limit)
+ num = limit;
+
+ /* ensure get updated desc */
+ rmb();
+ for (i = 0, pos = start; i < num; i++) {
+ skb = priv->rx_skb[pos];
+ if (unlikely(!skb)) {
+ netdev_err(dev, "inconsistent rx_skb\n");
+ break;
+ }
+ priv->rx_skb[pos] = NULL;
+
+ desc = priv->rx_bq.desc + pos;
+ len = (le32_to_cpu(desc->cmd) >> DESC_DATA_LEN_OFF) &
+ DESC_DATA_MASK;
+ addr = le32_to_cpu(desc->buff_addr);
+ dma_unmap_single(priv->dev, addr, MAC_MAX_FRAME_SIZE,
+ DMA_FROM_DEVICE);
+
+ skb_put(skb, len);
+ if (skb->len > MAC_MAX_FRAME_SIZE) {
+ netdev_err(dev, "rcv len err, len = %d\n", skb->len);
+ dev->stats.rx_errors++;
+ dev->stats.rx_length_errors++;
+ dev_kfree_skb_any(skb);
+ goto next;
+ }
+
+ skb->protocol = eth_type_trans(skb, dev);
+ napi_gro_receive(&priv->napi, skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += len;
+next:
+ pos = dma_ring_incr(pos, RX_DESC_NUM);
+ }
+
+ if (pos != start)
+ writel_relaxed(dma_byte(pos), priv->base + RX_BQ_RD_ADDR);
+
+ hix5hd2_rx_refill(priv);
+
+ return num;
+}
+
+static void hix5hd2_clean_sg_desc(struct hix5hd2_priv *priv,
+ struct sk_buff *skb, u32 pos)
+{
+ struct sg_desc *desc;
+ dma_addr_t addr;
+ u32 len;
+ int i;
+
+ desc = priv->tx_ring.desc + pos;
+
+ addr = le32_to_cpu(desc->linear_addr);
+ len = le32_to_cpu(desc->linear_len);
+ dma_unmap_single(priv->dev, addr, len, DMA_TO_DEVICE);
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ addr = le32_to_cpu(desc->frags[i].addr);
+ len = le32_to_cpu(desc->frags[i].size);
+ dma_unmap_page(priv->dev, addr, len, DMA_TO_DEVICE);
+ }
+}
+
+static void hix5hd2_xmit_reclaim(struct net_device *dev)
+{
+ struct sk_buff *skb;
+ struct hix5hd2_desc *desc;
+ struct hix5hd2_priv *priv = netdev_priv(dev);
+ unsigned int bytes_compl = 0, pkts_compl = 0;
+ u32 start, end, num, pos, i;
+ dma_addr_t addr;
+
+ netif_tx_lock(dev);
+
+ /* software read */
+ start = dma_cnt(readl_relaxed(priv->base + TX_RQ_RD_ADDR));
+ /* logic write */
+ end = dma_cnt(readl_relaxed(priv->base + TX_RQ_WR_ADDR));
+ num = CIRC_CNT(end, start, TX_DESC_NUM);
+
+ for (i = 0, pos = start; i < num; i++) {
+ skb = priv->tx_skb[pos];
+ if (unlikely(!skb)) {
+ netdev_err(dev, "inconsistent tx_skb\n");
+ break;
+ }
+
+ pkts_compl++;
+ bytes_compl += skb->len;
+ desc = priv->tx_rq.desc + pos;
+
+ if (skb_shinfo(skb)->nr_frags) {
+ hix5hd2_clean_sg_desc(priv, skb, pos);
+ } else {
+ addr = le32_to_cpu(desc->buff_addr);
+ dma_unmap_single(priv->dev, addr, skb->len,
+ DMA_TO_DEVICE);
+ }
+
+ priv->tx_skb[pos] = NULL;
+ dev_consume_skb_any(skb);
+ pos = dma_ring_incr(pos, TX_DESC_NUM);
+ }
+
+ if (pos != start)
+ writel_relaxed(dma_byte(pos), priv->base + TX_RQ_RD_ADDR);
+
+ netif_tx_unlock(dev);
+
+ if (pkts_compl || bytes_compl)
+ netdev_completed_queue(dev, pkts_compl, bytes_compl);
+
+ if (unlikely(netif_queue_stopped(priv->netdev)) && pkts_compl)
+ netif_wake_queue(priv->netdev);
+}
+
+static int hix5hd2_poll(struct napi_struct *napi, int budget)
+{
+ struct hix5hd2_priv *priv = container_of(napi,
+ struct hix5hd2_priv, napi);
+ struct net_device *dev = priv->netdev;
+ int work_done = 0, task = budget;
+ int ints, num;
+
+ do {
+ hix5hd2_xmit_reclaim(dev);
+ num = hix5hd2_rx(dev, task);
+ work_done += num;
+ task -= num;
+ if ((work_done >= budget) || (num == 0))
+ break;
+
+ ints = readl_relaxed(priv->base + RAW_PMU_INT);
+ writel_relaxed(ints, priv->base + RAW_PMU_INT);
+ } while (ints & DEF_INT_MASK);
+
+ if (work_done < budget) {
+ napi_complete_done(napi, work_done);
+ hix5hd2_irq_enable(priv);
+ }
+
+ return work_done;
+}
+
+static irqreturn_t hix5hd2_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct hix5hd2_priv *priv = netdev_priv(dev);
+ int ints = readl_relaxed(priv->base + RAW_PMU_INT);
+
+ writel_relaxed(ints, priv->base + RAW_PMU_INT);
+ if (likely(ints & DEF_INT_MASK)) {
+ hix5hd2_irq_disable(priv);
+ napi_schedule(&priv->napi);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static u32 hix5hd2_get_desc_cmd(struct sk_buff *skb, unsigned long hw_cap)
+{
+ u32 cmd = 0;
+
+ if (HAS_CAP_TSO(hw_cap)) {
+ if (skb_shinfo(skb)->nr_frags)
+ cmd |= DESC_SG;
+ cmd |= skb_shinfo(skb)->nr_frags << DESC_FRAGS_NUM_OFF;
+ } else {
+ cmd |= DESC_FL_FULL |
+ ((skb->len & DESC_DATA_MASK) << DESC_BUFF_LEN_OFF);
+ }
+
+ cmd |= (skb->len & DESC_DATA_MASK) << DESC_DATA_LEN_OFF;
+ cmd |= DESC_VLD_BUSY;
+
+ return cmd;
+}
+
+static int hix5hd2_fill_sg_desc(struct hix5hd2_priv *priv,
+ struct sk_buff *skb, u32 pos)
+{
+ struct sg_desc *desc;
+ dma_addr_t addr;
+ int ret;
+ int i;
+
+ desc = priv->tx_ring.desc + pos;
+
+ desc->total_len = cpu_to_le32(skb->len);
+ addr = dma_map_single(priv->dev, skb->data, skb_headlen(skb),
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(priv->dev, addr)))
+ return -EINVAL;
+ desc->linear_addr = cpu_to_le32(addr);
+ desc->linear_len = cpu_to_le32(skb_headlen(skb));
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ int len = skb_frag_size(frag);
+
+ addr = skb_frag_dma_map(priv->dev, frag, 0, len, DMA_TO_DEVICE);
+ ret = dma_mapping_error(priv->dev, addr);
+ if (unlikely(ret))
+ return -EINVAL;
+ desc->frags[i].addr = cpu_to_le32(addr);
+ desc->frags[i].size = cpu_to_le32(len);
+ }
+
+ return 0;
+}
+
+static netdev_tx_t hix5hd2_net_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct hix5hd2_priv *priv = netdev_priv(dev);
+ struct hix5hd2_desc *desc;
+ dma_addr_t addr;
+ u32 pos;
+ u32 cmd;
+ int ret;
+
+ /* software write pointer */
+ pos = dma_cnt(readl_relaxed(priv->base + TX_BQ_WR_ADDR));
+ if (unlikely(priv->tx_skb[pos])) {
+ dev->stats.tx_dropped++;
+ dev->stats.tx_fifo_errors++;
+ netif_stop_queue(dev);
+ return NETDEV_TX_BUSY;
+ }
+
+ desc = priv->tx_bq.desc + pos;
+
+ cmd = hix5hd2_get_desc_cmd(skb, priv->hw_cap);
+ desc->cmd = cpu_to_le32(cmd);
+
+ if (skb_shinfo(skb)->nr_frags) {
+ ret = hix5hd2_fill_sg_desc(priv, skb, pos);
+ if (unlikely(ret)) {
+ dev_kfree_skb_any(skb);
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+ addr = priv->tx_ring.phys_addr + pos * sizeof(struct sg_desc);
+ } else {
+ addr = dma_map_single(priv->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(priv->dev, addr))) {
+ dev_kfree_skb_any(skb);
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+ }
+ desc->buff_addr = cpu_to_le32(addr);
+
+ priv->tx_skb[pos] = skb;
+
+ /* ensure desc updated */
+ wmb();
+
+ pos = dma_ring_incr(pos, TX_DESC_NUM);
+ writel_relaxed(dma_byte(pos), priv->base + TX_BQ_WR_ADDR);
+
+ netif_trans_update(dev);
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+ netdev_sent_queue(dev, skb->len);
+
+ return NETDEV_TX_OK;
+}
+
+static void hix5hd2_free_dma_desc_rings(struct hix5hd2_priv *priv)
+{
+ struct hix5hd2_desc *desc;
+ dma_addr_t addr;
+ int i;
+
+ for (i = 0; i < RX_DESC_NUM; i++) {
+ struct sk_buff *skb = priv->rx_skb[i];
+ if (skb == NULL)
+ continue;
+
+ desc = priv->rx_fq.desc + i;
+ addr = le32_to_cpu(desc->buff_addr);
+ dma_unmap_single(priv->dev, addr,
+ MAC_MAX_FRAME_SIZE, DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ priv->rx_skb[i] = NULL;
+ }
+
+ for (i = 0; i < TX_DESC_NUM; i++) {
+ struct sk_buff *skb = priv->tx_skb[i];
+ if (skb == NULL)
+ continue;
+
+ desc = priv->tx_rq.desc + i;
+ addr = le32_to_cpu(desc->buff_addr);
+ dma_unmap_single(priv->dev, addr, skb->len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ priv->tx_skb[i] = NULL;
+ }
+}
+
+static int hix5hd2_net_open(struct net_device *dev)
+{
+ struct hix5hd2_priv *priv = netdev_priv(dev);
+ struct phy_device *phy;
+ int ret;
+
+ ret = clk_prepare_enable(priv->mac_core_clk);
+ if (ret < 0) {
+ netdev_err(dev, "failed to enable mac core clk %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(priv->mac_ifc_clk);
+ if (ret < 0) {
+ clk_disable_unprepare(priv->mac_core_clk);
+ netdev_err(dev, "failed to enable mac ifc clk %d\n", ret);
+ return ret;
+ }
+
+ phy = of_phy_connect(dev, priv->phy_node,
+ &hix5hd2_adjust_link, 0, priv->phy_mode);
+ if (!phy) {
+ clk_disable_unprepare(priv->mac_ifc_clk);
+ clk_disable_unprepare(priv->mac_core_clk);
+ return -ENODEV;
+ }
+
+ phy_start(phy);
+ hix5hd2_hw_init(priv);
+ hix5hd2_rx_refill(priv);
+
+ netdev_reset_queue(dev);
+ netif_start_queue(dev);
+ napi_enable(&priv->napi);
+
+ hix5hd2_port_enable(priv);
+ hix5hd2_irq_enable(priv);
+
+ return 0;
+}
+
+static int hix5hd2_net_close(struct net_device *dev)
+{
+ struct hix5hd2_priv *priv = netdev_priv(dev);
+
+ hix5hd2_port_disable(priv);
+ hix5hd2_irq_disable(priv);
+ napi_disable(&priv->napi);
+ netif_stop_queue(dev);
+ hix5hd2_free_dma_desc_rings(priv);
+
+ if (dev->phydev) {
+ phy_stop(dev->phydev);
+ phy_disconnect(dev->phydev);
+ }
+
+ clk_disable_unprepare(priv->mac_ifc_clk);
+ clk_disable_unprepare(priv->mac_core_clk);
+
+ return 0;
+}
+
+static void hix5hd2_tx_timeout_task(struct work_struct *work)
+{
+ struct hix5hd2_priv *priv;
+
+ priv = container_of(work, struct hix5hd2_priv, tx_timeout_task);
+ hix5hd2_net_close(priv->netdev);
+ hix5hd2_net_open(priv->netdev);
+}
+
+static void hix5hd2_net_timeout(struct net_device *dev, unsigned int txqueue)
+{
+ struct hix5hd2_priv *priv = netdev_priv(dev);
+
+ schedule_work(&priv->tx_timeout_task);
+}
+
+static const struct net_device_ops hix5hd2_netdev_ops = {
+ .ndo_open = hix5hd2_net_open,
+ .ndo_stop = hix5hd2_net_close,
+ .ndo_start_xmit = hix5hd2_net_xmit,
+ .ndo_tx_timeout = hix5hd2_net_timeout,
+ .ndo_set_mac_address = hix5hd2_net_set_mac_address,
+};
+
+static const struct ethtool_ops hix5hd2_ethtools_ops = {
+ .get_link = ethtool_op_get_link,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+};
+
+static int hix5hd2_mdio_wait_ready(struct mii_bus *bus)
+{
+ struct hix5hd2_priv *priv = bus->priv;
+ void __iomem *base = priv->base;
+ int i, timeout = 10000;
+
+ for (i = 0; readl_relaxed(base + MDIO_SINGLE_CMD) & MDIO_START; i++) {
+ if (i == timeout)
+ return -ETIMEDOUT;
+ usleep_range(10, 20);
+ }
+
+ return 0;
+}
+
+static int hix5hd2_mdio_read(struct mii_bus *bus, int phy, int reg)
+{
+ struct hix5hd2_priv *priv = bus->priv;
+ void __iomem *base = priv->base;
+ int val, ret;
+
+ ret = hix5hd2_mdio_wait_ready(bus);
+ if (ret < 0)
+ goto out;
+
+ writel_relaxed(MDIO_READ | phy << 8 | reg, base + MDIO_SINGLE_CMD);
+ ret = hix5hd2_mdio_wait_ready(bus);
+ if (ret < 0)
+ goto out;
+
+ val = readl_relaxed(base + MDIO_RDATA_STATUS);
+ if (val & MDIO_R_VALID) {
+ dev_err(bus->parent, "SMI bus read not valid\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ val = readl_relaxed(priv->base + MDIO_SINGLE_DATA);
+ ret = (val >> 16) & 0xFFFF;
+out:
+ return ret;
+}
+
+static int hix5hd2_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
+{
+ struct hix5hd2_priv *priv = bus->priv;
+ void __iomem *base = priv->base;
+ int ret;
+
+ ret = hix5hd2_mdio_wait_ready(bus);
+ if (ret < 0)
+ goto out;
+
+ writel_relaxed(val, base + MDIO_SINGLE_DATA);
+ writel_relaxed(MDIO_WRITE | phy << 8 | reg, base + MDIO_SINGLE_CMD);
+ ret = hix5hd2_mdio_wait_ready(bus);
+out:
+ return ret;
+}
+
+static void hix5hd2_destroy_hw_desc_queue(struct hix5hd2_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < QUEUE_NUMS; i++) {
+ if (priv->pool[i].desc) {
+ dma_free_coherent(priv->dev, priv->pool[i].size,
+ priv->pool[i].desc,
+ priv->pool[i].phys_addr);
+ priv->pool[i].desc = NULL;
+ }
+ }
+}
+
+static int hix5hd2_init_hw_desc_queue(struct hix5hd2_priv *priv)
+{
+ struct device *dev = priv->dev;
+ struct hix5hd2_desc *virt_addr;
+ dma_addr_t phys_addr;
+ int size, i;
+
+ priv->rx_fq.count = RX_DESC_NUM;
+ priv->rx_bq.count = RX_DESC_NUM;
+ priv->tx_bq.count = TX_DESC_NUM;
+ priv->tx_rq.count = TX_DESC_NUM;
+
+ for (i = 0; i < QUEUE_NUMS; i++) {
+ size = priv->pool[i].count * sizeof(struct hix5hd2_desc);
+ virt_addr = dma_alloc_coherent(dev, size, &phys_addr,
+ GFP_KERNEL);
+ if (virt_addr == NULL)
+ goto error_free_pool;
+
+ priv->pool[i].size = size;
+ priv->pool[i].desc = virt_addr;
+ priv->pool[i].phys_addr = phys_addr;
+ }
+ return 0;
+
+error_free_pool:
+ hix5hd2_destroy_hw_desc_queue(priv);
+
+ return -ENOMEM;
+}
+
+static int hix5hd2_init_sg_desc_queue(struct hix5hd2_priv *priv)
+{
+ struct sg_desc *desc;
+ dma_addr_t phys_addr;
+
+ desc = dma_alloc_coherent(priv->dev,
+ TX_DESC_NUM * sizeof(struct sg_desc),
+ &phys_addr, GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ priv->tx_ring.desc = desc;
+ priv->tx_ring.phys_addr = phys_addr;
+
+ return 0;
+}
+
+static void hix5hd2_destroy_sg_desc_queue(struct hix5hd2_priv *priv)
+{
+ if (priv->tx_ring.desc) {
+ dma_free_coherent(priv->dev,
+ TX_DESC_NUM * sizeof(struct sg_desc),
+ priv->tx_ring.desc, priv->tx_ring.phys_addr);
+ priv->tx_ring.desc = NULL;
+ }
+}
+
+static inline void hix5hd2_mac_core_reset(struct hix5hd2_priv *priv)
+{
+ if (!priv->mac_core_rst)
+ return;
+
+ reset_control_assert(priv->mac_core_rst);
+ reset_control_deassert(priv->mac_core_rst);
+}
+
+static void hix5hd2_sleep_us(u32 time_us)
+{
+ u32 time_ms;
+
+ if (!time_us)
+ return;
+
+ time_ms = DIV_ROUND_UP(time_us, 1000);
+ if (time_ms < 20)
+ usleep_range(time_us, time_us + 500);
+ else
+ msleep(time_ms);
+}
+
+static void hix5hd2_phy_reset(struct hix5hd2_priv *priv)
+{
+ /* To make sure PHY hardware reset success,
+ * we must keep PHY in deassert state first and
+ * then complete the hardware reset operation
+ */
+ reset_control_deassert(priv->phy_rst);
+ hix5hd2_sleep_us(priv->phy_reset_delays[PRE_DELAY]);
+
+ reset_control_assert(priv->phy_rst);
+ /* delay some time to ensure reset ok,
+ * this depends on PHY hardware feature
+ */
+ hix5hd2_sleep_us(priv->phy_reset_delays[PULSE]);
+ reset_control_deassert(priv->phy_rst);
+ /* delay some time to ensure later MDIO access */
+ hix5hd2_sleep_us(priv->phy_reset_delays[POST_DELAY]);
+}
+
+static const struct of_device_id hix5hd2_of_match[];
+
+static int hix5hd2_dev_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ const struct of_device_id *of_id = NULL;
+ struct net_device *ndev;
+ struct hix5hd2_priv *priv;
+ struct mii_bus *bus;
+ int ret;
+
+ ndev = alloc_etherdev(sizeof(struct hix5hd2_priv));
+ if (!ndev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ndev);
+
+ priv = netdev_priv(ndev);
+ priv->dev = dev;
+ priv->netdev = ndev;
+
+ of_id = of_match_device(hix5hd2_of_match, dev);
+ if (!of_id) {
+ ret = -EINVAL;
+ goto out_free_netdev;
+ }
+ priv->hw_cap = (unsigned long)of_id->data;
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base)) {
+ ret = PTR_ERR(priv->base);
+ goto out_free_netdev;
+ }
+
+ priv->ctrl_base = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(priv->ctrl_base)) {
+ ret = PTR_ERR(priv->ctrl_base);
+ goto out_free_netdev;
+ }
+
+ priv->mac_core_clk = devm_clk_get(&pdev->dev, "mac_core");
+ if (IS_ERR(priv->mac_core_clk)) {
+ netdev_err(ndev, "failed to get mac core clk\n");
+ ret = -ENODEV;
+ goto out_free_netdev;
+ }
+
+ ret = clk_prepare_enable(priv->mac_core_clk);
+ if (ret < 0) {
+ netdev_err(ndev, "failed to enable mac core clk %d\n", ret);
+ goto out_free_netdev;
+ }
+
+ priv->mac_ifc_clk = devm_clk_get(&pdev->dev, "mac_ifc");
+ if (IS_ERR(priv->mac_ifc_clk))
+ priv->mac_ifc_clk = NULL;
+
+ ret = clk_prepare_enable(priv->mac_ifc_clk);
+ if (ret < 0) {
+ netdev_err(ndev, "failed to enable mac ifc clk %d\n", ret);
+ goto out_disable_mac_core_clk;
+ }
+
+ priv->mac_core_rst = devm_reset_control_get(dev, "mac_core");
+ if (IS_ERR(priv->mac_core_rst))
+ priv->mac_core_rst = NULL;
+ hix5hd2_mac_core_reset(priv);
+
+ priv->mac_ifc_rst = devm_reset_control_get(dev, "mac_ifc");
+ if (IS_ERR(priv->mac_ifc_rst))
+ priv->mac_ifc_rst = NULL;
+
+ priv->phy_rst = devm_reset_control_get(dev, "phy");
+ if (IS_ERR(priv->phy_rst)) {
+ priv->phy_rst = NULL;
+ } else {
+ ret = of_property_read_u32_array(node,
+ PHY_RESET_DELAYS_PROPERTY,
+ priv->phy_reset_delays,
+ DELAYS_NUM);
+ if (ret)
+ goto out_disable_clk;
+ hix5hd2_phy_reset(priv);
+ }
+
+ bus = mdiobus_alloc();
+ if (bus == NULL) {
+ ret = -ENOMEM;
+ goto out_disable_clk;
+ }
+
+ bus->priv = priv;
+ bus->name = "hix5hd2_mii_bus";
+ bus->read = hix5hd2_mdio_read;
+ bus->write = hix5hd2_mdio_write;
+ bus->parent = &pdev->dev;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(&pdev->dev));
+ priv->bus = bus;
+
+ ret = of_mdiobus_register(bus, node);
+ if (ret)
+ goto err_free_mdio;
+
+ ret = of_get_phy_mode(node, &priv->phy_mode);
+ if (ret) {
+ netdev_err(ndev, "not find phy-mode\n");
+ goto err_mdiobus;
+ }
+
+ priv->phy_node = of_parse_phandle(node, "phy-handle", 0);
+ if (!priv->phy_node) {
+ netdev_err(ndev, "not find phy-handle\n");
+ ret = -EINVAL;
+ goto err_mdiobus;
+ }
+
+ ndev->irq = platform_get_irq(pdev, 0);
+ if (ndev->irq < 0) {
+ ret = ndev->irq;
+ goto out_phy_node;
+ }
+
+ ret = devm_request_irq(dev, ndev->irq, hix5hd2_interrupt,
+ 0, pdev->name, ndev);
+ if (ret) {
+ netdev_err(ndev, "devm_request_irq failed\n");
+ goto out_phy_node;
+ }
+
+ ret = of_get_ethdev_address(node, ndev);
+ if (ret) {
+ eth_hw_addr_random(ndev);
+ netdev_warn(ndev, "using random MAC address %pM\n",
+ ndev->dev_addr);
+ }
+
+ INIT_WORK(&priv->tx_timeout_task, hix5hd2_tx_timeout_task);
+ ndev->watchdog_timeo = 6 * HZ;
+ ndev->priv_flags |= IFF_UNICAST_FLT;
+ ndev->netdev_ops = &hix5hd2_netdev_ops;
+ ndev->ethtool_ops = &hix5hd2_ethtools_ops;
+ SET_NETDEV_DEV(ndev, dev);
+
+ if (HAS_CAP_TSO(priv->hw_cap))
+ ndev->hw_features |= NETIF_F_SG;
+
+ ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
+ ndev->vlan_features |= ndev->features;
+
+ ret = hix5hd2_init_hw_desc_queue(priv);
+ if (ret)
+ goto out_phy_node;
+
+ netif_napi_add(ndev, &priv->napi, hix5hd2_poll);
+
+ if (HAS_CAP_TSO(priv->hw_cap)) {
+ ret = hix5hd2_init_sg_desc_queue(priv);
+ if (ret)
+ goto out_destroy_queue;
+ }
+
+ ret = register_netdev(priv->netdev);
+ if (ret) {
+ netdev_err(ndev, "register_netdev failed!");
+ goto out_destroy_queue;
+ }
+
+ clk_disable_unprepare(priv->mac_ifc_clk);
+ clk_disable_unprepare(priv->mac_core_clk);
+
+ return ret;
+
+out_destroy_queue:
+ if (HAS_CAP_TSO(priv->hw_cap))
+ hix5hd2_destroy_sg_desc_queue(priv);
+ netif_napi_del(&priv->napi);
+ hix5hd2_destroy_hw_desc_queue(priv);
+out_phy_node:
+ of_node_put(priv->phy_node);
+err_mdiobus:
+ mdiobus_unregister(bus);
+err_free_mdio:
+ mdiobus_free(bus);
+out_disable_clk:
+ clk_disable_unprepare(priv->mac_ifc_clk);
+out_disable_mac_core_clk:
+ clk_disable_unprepare(priv->mac_core_clk);
+out_free_netdev:
+ free_netdev(ndev);
+
+ return ret;
+}
+
+static int hix5hd2_dev_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct hix5hd2_priv *priv = netdev_priv(ndev);
+
+ netif_napi_del(&priv->napi);
+ unregister_netdev(ndev);
+ mdiobus_unregister(priv->bus);
+ mdiobus_free(priv->bus);
+
+ if (HAS_CAP_TSO(priv->hw_cap))
+ hix5hd2_destroy_sg_desc_queue(priv);
+ hix5hd2_destroy_hw_desc_queue(priv);
+ of_node_put(priv->phy_node);
+ cancel_work_sync(&priv->tx_timeout_task);
+ free_netdev(ndev);
+
+ return 0;
+}
+
+static const struct of_device_id hix5hd2_of_match[] = {
+ { .compatible = "hisilicon,hisi-gmac-v1", .data = (void *)GEMAC_V1 },
+ { .compatible = "hisilicon,hisi-gmac-v2", .data = (void *)GEMAC_V2 },
+ { .compatible = "hisilicon,hix5hd2-gmac", .data = (void *)GEMAC_V1 },
+ { .compatible = "hisilicon,hi3798cv200-gmac", .data = (void *)GEMAC_V2 },
+ { .compatible = "hisilicon,hi3516a-gmac", .data = (void *)GEMAC_V2 },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, hix5hd2_of_match);
+
+static struct platform_driver hix5hd2_dev_driver = {
+ .driver = {
+ .name = "hisi-gmac",
+ .of_match_table = hix5hd2_of_match,
+ },
+ .probe = hix5hd2_dev_probe,
+ .remove = hix5hd2_dev_remove,
+};
+
+module_platform_driver(hix5hd2_dev_driver);
+
+MODULE_DESCRIPTION("HISILICON Gigabit Ethernet MAC driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:hisi-gmac");
diff --git a/drivers/net/ethernet/hisilicon/hns/Makefile b/drivers/net/ethernet/hisilicon/hns/Makefile
new file mode 100644
index 0000000000..7aa623b9c8
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/Makefile
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the HISILICON network device drivers.
+#
+
+obj-$(CONFIG_HNS) += hnae.o
+
+obj-$(CONFIG_HNS_DSAF) += hns_dsaf.o
+hns_dsaf-objs = hns_ae_adapt.o hns_dsaf_gmac.o hns_dsaf_mac.o hns_dsaf_misc.o \
+ hns_dsaf_main.o hns_dsaf_ppe.o hns_dsaf_rcb.o hns_dsaf_xgmac.o
+
+obj-$(CONFIG_HNS_ENET) += hns_enet_drv.o
+hns_enet_drv-objs = hns_enet.o hns_ethtool.o
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c
new file mode 100644
index 0000000000..8a1027ad34
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.c
@@ -0,0 +1,467 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include "hnae.h"
+
+#define cls_to_ae_dev(dev) container_of(dev, struct hnae_ae_dev, cls_dev)
+
+static struct class *hnae_class;
+
+static void
+hnae_list_add(spinlock_t *lock, struct list_head *node, struct list_head *head)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(lock, flags);
+ list_add_tail_rcu(node, head);
+ spin_unlock_irqrestore(lock, flags);
+}
+
+static void hnae_list_del(spinlock_t *lock, struct list_head *node)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(lock, flags);
+ list_del_rcu(node);
+ spin_unlock_irqrestore(lock, flags);
+}
+
+static int hnae_alloc_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
+{
+ unsigned int order = hnae_page_order(ring);
+ struct page *p = dev_alloc_pages(order);
+
+ if (!p)
+ return -ENOMEM;
+
+ cb->priv = p;
+ cb->page_offset = 0;
+ cb->reuse_flag = 0;
+ cb->buf = page_address(p);
+ cb->length = hnae_page_size(ring);
+ cb->type = DESC_TYPE_PAGE;
+
+ return 0;
+}
+
+static void hnae_free_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
+{
+ if (unlikely(!cb->priv))
+ return;
+
+ if (cb->type == DESC_TYPE_SKB)
+ dev_kfree_skb_any((struct sk_buff *)cb->priv);
+ else if (unlikely(is_rx_ring(ring)))
+ put_page((struct page *)cb->priv);
+
+ cb->priv = NULL;
+}
+
+static int hnae_map_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
+{
+ cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0,
+ cb->length, ring_to_dma_dir(ring));
+
+ if (dma_mapping_error(ring_to_dev(ring), cb->dma))
+ return -EIO;
+
+ return 0;
+}
+
+static void hnae_unmap_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
+{
+ if (cb->type == DESC_TYPE_SKB)
+ dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
+ ring_to_dma_dir(ring));
+ else if (cb->length)
+ dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
+ ring_to_dma_dir(ring));
+}
+
+static struct hnae_buf_ops hnae_bops = {
+ .alloc_buffer = hnae_alloc_buffer,
+ .free_buffer = hnae_free_buffer,
+ .map_buffer = hnae_map_buffer,
+ .unmap_buffer = hnae_unmap_buffer,
+};
+
+static int __ae_match(struct device *dev, const void *data)
+{
+ struct hnae_ae_dev *hdev = cls_to_ae_dev(dev);
+
+ if (dev_of_node(hdev->dev))
+ return (data == &hdev->dev->of_node->fwnode);
+ else if (is_acpi_node(hdev->dev->fwnode))
+ return (data == hdev->dev->fwnode);
+
+ dev_err(dev, "__ae_match cannot read cfg data from OF or acpi\n");
+ return 0;
+}
+
+static struct hnae_ae_dev *find_ae(const struct fwnode_handle *fwnode)
+{
+ struct device *dev;
+
+ WARN_ON(!fwnode);
+
+ dev = class_find_device(hnae_class, NULL, fwnode, __ae_match);
+
+ return dev ? cls_to_ae_dev(dev) : NULL;
+}
+
+static void hnae_free_buffers(struct hnae_ring *ring)
+{
+ int i;
+
+ for (i = 0; i < ring->desc_num; i++)
+ hnae_free_buffer_detach(ring, i);
+}
+
+/* Allocate memory for raw pkg, and map with dma */
+static int hnae_alloc_buffers(struct hnae_ring *ring)
+{
+ int i, j, ret;
+
+ for (i = 0; i < ring->desc_num; i++) {
+ ret = hnae_alloc_buffer_attach(ring, i);
+ if (ret)
+ goto out_buffer_fail;
+ }
+
+ return 0;
+
+out_buffer_fail:
+ for (j = i - 1; j >= 0; j--)
+ hnae_free_buffer_detach(ring, j);
+ return ret;
+}
+
+/* free desc along with its attached buffer */
+static void hnae_free_desc(struct hnae_ring *ring)
+{
+ dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr,
+ ring->desc_num * sizeof(ring->desc[0]),
+ ring_to_dma_dir(ring));
+ ring->desc_dma_addr = 0;
+ kfree(ring->desc);
+ ring->desc = NULL;
+}
+
+/* alloc desc, without buffer attached */
+static int hnae_alloc_desc(struct hnae_ring *ring)
+{
+ int size = ring->desc_num * sizeof(ring->desc[0]);
+
+ ring->desc = kzalloc(size, GFP_KERNEL);
+ if (!ring->desc)
+ return -ENOMEM;
+
+ ring->desc_dma_addr = dma_map_single(ring_to_dev(ring),
+ ring->desc, size, ring_to_dma_dir(ring));
+ if (dma_mapping_error(ring_to_dev(ring), ring->desc_dma_addr)) {
+ ring->desc_dma_addr = 0;
+ kfree(ring->desc);
+ ring->desc = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/* fini ring, also free the buffer for the ring */
+static void hnae_fini_ring(struct hnae_ring *ring)
+{
+ if (is_rx_ring(ring))
+ hnae_free_buffers(ring);
+
+ hnae_free_desc(ring);
+ kfree(ring->desc_cb);
+ ring->desc_cb = NULL;
+ ring->next_to_clean = 0;
+ ring->next_to_use = 0;
+}
+
+/* init ring, and with buffer for rx ring */
+static int
+hnae_init_ring(struct hnae_queue *q, struct hnae_ring *ring, int flags)
+{
+ int ret;
+
+ if (ring->desc_num <= 0 || ring->buf_size <= 0)
+ return -EINVAL;
+
+ ring->q = q;
+ ring->flags = flags;
+ ring->coal_param = q->handle->coal_param;
+ assert(!ring->desc && !ring->desc_cb && !ring->desc_dma_addr);
+
+ /* not matter for tx or rx ring, the ntc and ntc start from 0 */
+ assert(ring->next_to_use == 0);
+ assert(ring->next_to_clean == 0);
+
+ ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]),
+ GFP_KERNEL);
+ if (!ring->desc_cb) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = hnae_alloc_desc(ring);
+ if (ret)
+ goto out_with_desc_cb;
+
+ if (is_rx_ring(ring)) {
+ ret = hnae_alloc_buffers(ring);
+ if (ret)
+ goto out_with_desc;
+ }
+
+ return 0;
+
+out_with_desc:
+ hnae_free_desc(ring);
+out_with_desc_cb:
+ kfree(ring->desc_cb);
+ ring->desc_cb = NULL;
+out:
+ return ret;
+}
+
+static int hnae_init_queue(struct hnae_handle *h, struct hnae_queue *q,
+ struct hnae_ae_dev *dev)
+{
+ int ret;
+
+ q->dev = dev;
+ q->handle = h;
+
+ ret = hnae_init_ring(q, &q->tx_ring, q->tx_ring.flags | RINGF_DIR);
+ if (ret)
+ goto out;
+
+ ret = hnae_init_ring(q, &q->rx_ring, q->rx_ring.flags & ~RINGF_DIR);
+ if (ret)
+ goto out_with_tx_ring;
+
+ if (dev->ops->init_queue)
+ dev->ops->init_queue(q);
+
+ return 0;
+
+out_with_tx_ring:
+ hnae_fini_ring(&q->tx_ring);
+out:
+ return ret;
+}
+
+static void hnae_fini_queue(struct hnae_queue *q)
+{
+ if (q->dev->ops->fini_queue)
+ q->dev->ops->fini_queue(q);
+
+ hnae_fini_ring(&q->tx_ring);
+ hnae_fini_ring(&q->rx_ring);
+}
+
+/*
+ * ae_chain - define ae chain head
+ */
+static RAW_NOTIFIER_HEAD(ae_chain);
+
+int hnae_register_notifier(struct notifier_block *nb)
+{
+ return raw_notifier_chain_register(&ae_chain, nb);
+}
+EXPORT_SYMBOL(hnae_register_notifier);
+
+void hnae_unregister_notifier(struct notifier_block *nb)
+{
+ if (raw_notifier_chain_unregister(&ae_chain, nb))
+ dev_err(NULL, "notifier chain unregister fail\n");
+}
+EXPORT_SYMBOL(hnae_unregister_notifier);
+
+int hnae_reinit_handle(struct hnae_handle *handle)
+{
+ int i, j;
+ int ret;
+
+ for (i = 0; i < handle->q_num; i++) /* free ring*/
+ hnae_fini_queue(handle->qs[i]);
+
+ if (handle->dev->ops->reset)
+ handle->dev->ops->reset(handle);
+
+ for (i = 0; i < handle->q_num; i++) {/* reinit ring*/
+ ret = hnae_init_queue(handle, handle->qs[i], handle->dev);
+ if (ret)
+ goto out_when_init_queue;
+ }
+ return 0;
+out_when_init_queue:
+ for (j = i - 1; j >= 0; j--)
+ hnae_fini_queue(handle->qs[j]);
+ return ret;
+}
+EXPORT_SYMBOL(hnae_reinit_handle);
+
+/* hnae_get_handle - get a handle from the AE
+ * @owner_dev: the dev use this handle
+ * @ae_id: the id of the ae to be used
+ * @ae_opts: the options set for the handle
+ * @bops: the callbacks for buffer management
+ *
+ * return handle ptr or ERR_PTR
+ */
+struct hnae_handle *hnae_get_handle(struct device *owner_dev,
+ const struct fwnode_handle *fwnode,
+ u32 port_id,
+ struct hnae_buf_ops *bops)
+{
+ struct hnae_ae_dev *dev;
+ struct hnae_handle *handle;
+ int i, j;
+ int ret;
+
+ dev = find_ae(fwnode);
+ if (!dev)
+ return ERR_PTR(-ENODEV);
+
+ handle = dev->ops->get_handle(dev, port_id);
+ if (IS_ERR(handle)) {
+ put_device(&dev->cls_dev);
+ return handle;
+ }
+
+ handle->dev = dev;
+ handle->owner_dev = owner_dev;
+ handle->bops = bops ? bops : &hnae_bops;
+ handle->eport_id = port_id;
+
+ for (i = 0; i < handle->q_num; i++) {
+ ret = hnae_init_queue(handle, handle->qs[i], dev);
+ if (ret)
+ goto out_when_init_queue;
+ }
+
+ __module_get(dev->owner);
+
+ hnae_list_add(&dev->lock, &handle->node, &dev->handle_list);
+
+ return handle;
+
+out_when_init_queue:
+ for (j = i - 1; j >= 0; j--)
+ hnae_fini_queue(handle->qs[j]);
+
+ put_device(&dev->cls_dev);
+
+ return ERR_PTR(-ENOMEM);
+}
+EXPORT_SYMBOL(hnae_get_handle);
+
+void hnae_put_handle(struct hnae_handle *h)
+{
+ struct hnae_ae_dev *dev = h->dev;
+ int i;
+
+ for (i = 0; i < h->q_num; i++)
+ hnae_fini_queue(h->qs[i]);
+
+ if (h->dev->ops->reset)
+ h->dev->ops->reset(h);
+
+ hnae_list_del(&dev->lock, &h->node);
+
+ if (dev->ops->put_handle)
+ dev->ops->put_handle(h);
+
+ module_put(dev->owner);
+
+ put_device(&dev->cls_dev);
+}
+EXPORT_SYMBOL(hnae_put_handle);
+
+static void hnae_release(struct device *dev)
+{
+}
+
+/**
+ * hnae_ae_register - register a AE engine to hnae framework
+ * @hdev: the hnae ae engine device
+ * @owner: the module who provides this dev
+ * NOTE: the duplicated name will not be checked
+ */
+int hnae_ae_register(struct hnae_ae_dev *hdev, struct module *owner)
+{
+ static atomic_t id = ATOMIC_INIT(-1);
+ int ret;
+
+ if (!hdev->dev)
+ return -ENODEV;
+
+ if (!hdev->ops || !hdev->ops->get_handle ||
+ !hdev->ops->toggle_ring_irq ||
+ !hdev->ops->get_status || !hdev->ops->adjust_link)
+ return -EINVAL;
+
+ hdev->owner = owner;
+ hdev->id = (int)atomic_inc_return(&id);
+ hdev->cls_dev.parent = hdev->dev;
+ hdev->cls_dev.class = hnae_class;
+ hdev->cls_dev.release = hnae_release;
+ (void)dev_set_name(&hdev->cls_dev, "hnae%d", hdev->id);
+ ret = device_register(&hdev->cls_dev);
+ if (ret) {
+ put_device(&hdev->cls_dev);
+ return ret;
+ }
+
+ INIT_LIST_HEAD(&hdev->handle_list);
+ spin_lock_init(&hdev->lock);
+
+ ret = raw_notifier_call_chain(&ae_chain, HNAE_AE_REGISTER, NULL);
+ if (ret)
+ dev_dbg(hdev->dev,
+ "has not notifier for AE: %s\n", hdev->name);
+
+ return 0;
+}
+EXPORT_SYMBOL(hnae_ae_register);
+
+/**
+ * hnae_ae_unregister - unregisters a HNAE AE engine
+ * @hdev: the device to unregister
+ */
+void hnae_ae_unregister(struct hnae_ae_dev *hdev)
+{
+ device_unregister(&hdev->cls_dev);
+}
+EXPORT_SYMBOL(hnae_ae_unregister);
+
+static int __init hnae_init(void)
+{
+ hnae_class = class_create("hnae");
+ return PTR_ERR_OR_ZERO(hnae_class);
+}
+
+static void __exit hnae_exit(void)
+{
+ class_destroy(hnae_class);
+}
+
+subsys_initcall(hnae_init);
+module_exit(hnae_exit);
+
+MODULE_AUTHOR("Hisilicon, Inc.");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Hisilicon Network Acceleration Engine Framework");
+
+/* vi: set tw=78 noet: */
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h
new file mode 100644
index 0000000000..d72657444e
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.h
@@ -0,0 +1,707 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ */
+
+#ifndef __HNAE_H
+#define __HNAE_H
+
+/* Names used in this framework:
+ * ae handle (handle):
+ * a set of queues provided by AE
+ * ring buffer queue (rbq):
+ * the channel between upper layer and the AE, can do tx and rx
+ * ring:
+ * a tx or rx channel within a rbq
+ * ring description (desc):
+ * an element in the ring with packet information
+ * buffer:
+ * a memory region referred by desc with the full packet payload
+ *
+ * "num" means a static number set as a parameter, "count" mean a dynamic
+ * number set while running
+ * "cb" means control block
+ */
+
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/notifier.h>
+#include <linux/phy.h>
+#include <linux/types.h>
+
+#define HNAE_DRIVER_VERSION "2.0"
+#define HNAE_DRIVER_NAME "hns"
+#define HNAE_COPYRIGHT "Copyright(c) 2015 Huawei Corporation."
+#define HNAE_DRIVER_STRING "Hisilicon Network Subsystem Driver"
+#define HNAE_DEFAULT_DEVICE_DESCR "Hisilicon Network Subsystem"
+
+#ifdef DEBUG
+
+#ifndef assert
+#define assert(expr) \
+do { \
+ if (!(expr)) { \
+ pr_err("Assertion failed! %s, %s, %s, line %d\n", \
+ #expr, __FILE__, __func__, __LINE__); \
+ } \
+} while (0)
+#endif
+
+#else
+
+#ifndef assert
+#define assert(expr)
+#endif
+
+#endif
+
+#define AE_VERSION_1 ('6' << 16 | '6' << 8 | '0')
+#define AE_VERSION_2 ('1' << 24 | '6' << 16 | '1' << 8 | '0')
+#define AE_IS_VER1(ver) ((ver) == AE_VERSION_1)
+#define AE_NAME_SIZE 16
+
+#define BD_SIZE_2048_MAX_MTU 6000
+
+/* some said the RX and TX RCB format should not be the same in the future. But
+ * it is the same now...
+ */
+#define RCB_REG_BASEADDR_L 0x00 /* P660 support only 32bit accessing */
+#define RCB_REG_BASEADDR_H 0x04
+#define RCB_REG_BD_NUM 0x08
+#define RCB_REG_BD_LEN 0x0C
+#define RCB_REG_PKTLINE 0x10
+#define RCB_REG_TAIL 0x18
+#define RCB_REG_HEAD 0x1C
+#define RCB_REG_FBDNUM 0x20
+#define RCB_REG_OFFSET 0x24 /* pkt num to be handled */
+#define RCB_REG_PKTNUM_RECORD 0x2C /* total pkt received */
+
+#define HNS_RX_HEAD_SIZE 256
+
+#define HNAE_AE_REGISTER 0x1
+
+#define RCB_RING_NAME_LEN (IFNAMSIZ + 4)
+
+#define HNAE_LOWEST_LATENCY_COAL_PARAM 30
+#define HNAE_LOW_LATENCY_COAL_PARAM 80
+#define HNAE_BULK_LATENCY_COAL_PARAM 150
+
+enum hnae_led_state {
+ HNAE_LED_INACTIVE,
+ HNAE_LED_ACTIVE,
+ HNAE_LED_ON,
+ HNAE_LED_OFF
+};
+
+#define HNS_RX_FLAG_VLAN_PRESENT 0x1
+#define HNS_RX_FLAG_L3ID_IPV4 0x0
+#define HNS_RX_FLAG_L3ID_IPV6 0x1
+#define HNS_RX_FLAG_L4ID_UDP 0x0
+#define HNS_RX_FLAG_L4ID_TCP 0x1
+#define HNS_RX_FLAG_L4ID_SCTP 0x3
+
+#define HNS_TXD_ASID_S 0
+#define HNS_TXD_ASID_M (0xff << HNS_TXD_ASID_S)
+#define HNS_TXD_BUFNUM_S 8
+#define HNS_TXD_BUFNUM_M (0x3 << HNS_TXD_BUFNUM_S)
+#define HNS_TXD_PORTID_S 10
+#define HNS_TXD_PORTID_M (0x7 << HNS_TXD_PORTID_S)
+
+#define HNS_TXD_RA_B 8
+#define HNS_TXD_RI_B 9
+#define HNS_TXD_L4CS_B 10
+#define HNS_TXD_L3CS_B 11
+#define HNS_TXD_FE_B 12
+#define HNS_TXD_VLD_B 13
+#define HNS_TXD_IPOFFSET_S 14
+#define HNS_TXD_IPOFFSET_M (0xff << HNS_TXD_IPOFFSET_S)
+
+#define HNS_RXD_IPOFFSET_S 0
+#define HNS_RXD_IPOFFSET_M (0xff << HNS_TXD_IPOFFSET_S)
+#define HNS_RXD_BUFNUM_S 8
+#define HNS_RXD_BUFNUM_M (0x3 << HNS_RXD_BUFNUM_S)
+#define HNS_RXD_PORTID_S 10
+#define HNS_RXD_PORTID_M (0x7 << HNS_RXD_PORTID_S)
+#define HNS_RXD_DMAC_S 13
+#define HNS_RXD_DMAC_M (0x3 << HNS_RXD_DMAC_S)
+#define HNS_RXD_VLAN_S 15
+#define HNS_RXD_VLAN_M (0x3 << HNS_RXD_VLAN_S)
+#define HNS_RXD_L3ID_S 17
+#define HNS_RXD_L3ID_M (0xf << HNS_RXD_L3ID_S)
+#define HNS_RXD_L4ID_S 21
+#define HNS_RXD_L4ID_M (0xf << HNS_RXD_L4ID_S)
+#define HNS_RXD_FE_B 25
+#define HNS_RXD_FRAG_B 26
+#define HNS_RXD_VLD_B 27
+#define HNS_RXD_L2E_B 28
+#define HNS_RXD_L3E_B 29
+#define HNS_RXD_L4E_B 30
+#define HNS_RXD_DROP_B 31
+
+#define HNS_RXD_VLANID_S 8
+#define HNS_RXD_VLANID_M (0xfff << HNS_RXD_VLANID_S)
+#define HNS_RXD_CFI_B 20
+#define HNS_RXD_PRI_S 21
+#define HNS_RXD_PRI_M (0x7 << HNS_RXD_PRI_S)
+#define HNS_RXD_ASID_S 24
+#define HNS_RXD_ASID_M (0xff << HNS_RXD_ASID_S)
+
+#define HNSV2_TXD_BUFNUM_S 0
+#define HNSV2_TXD_BUFNUM_M (0x7 << HNSV2_TXD_BUFNUM_S)
+#define HNSV2_TXD_PORTID_S 4
+#define HNSV2_TXD_PORTID_M (0X7 << HNSV2_TXD_PORTID_S)
+#define HNSV2_TXD_RI_B 1
+#define HNSV2_TXD_L4CS_B 2
+#define HNSV2_TXD_L3CS_B 3
+#define HNSV2_TXD_FE_B 4
+#define HNSV2_TXD_VLD_B 5
+
+#define HNSV2_TXD_TSE_B 0
+#define HNSV2_TXD_VLAN_EN_B 1
+#define HNSV2_TXD_SNAP_B 2
+#define HNSV2_TXD_IPV6_B 3
+#define HNSV2_TXD_SCTP_B 4
+
+/* hardware spec ring buffer format */
+struct __packed hnae_desc {
+ __le64 addr;
+ union {
+ struct {
+ union {
+ __le16 asid_bufnum_pid;
+ __le16 asid;
+ };
+ __le16 send_size;
+ union {
+ __le32 flag_ipoffset;
+ struct {
+ __u8 bn_pid;
+ __u8 ra_ri_cs_fe_vld;
+ __u8 ip_offset;
+ __u8 tse_vlan_snap_v6_sctp_nth;
+ };
+ };
+ __le16 mss;
+ __u8 l4_len;
+ __u8 reserved1;
+ __le16 paylen;
+ __u8 vmid;
+ __u8 qid;
+ __le32 reserved2[2];
+ } tx;
+
+ struct {
+ __le32 ipoff_bnum_pid_flag;
+ __le16 pkt_len;
+ __le16 size;
+ union {
+ __le32 vlan_pri_asid;
+ struct {
+ __le16 asid;
+ __le16 vlan_cfi_pri;
+ };
+ };
+ __le32 rss_hash;
+ __le32 reserved_1[2];
+ } rx;
+ };
+};
+
+struct hnae_desc_cb {
+ dma_addr_t dma; /* dma address of this desc */
+ void *buf; /* cpu addr for a desc */
+
+ /* priv data for the desc, e.g. skb when use with ip stack*/
+ void *priv;
+ u32 page_offset;
+ u32 length; /* length of the buffer */
+
+ u16 reuse_flag;
+
+ /* desc type, used by the ring user to mark the type of the priv data */
+ u16 type;
+};
+
+#define setflags(flags, bits) ((flags) |= (bits))
+#define unsetflags(flags, bits) ((flags) &= ~(bits))
+
+/* hnae_ring->flags fields */
+#define RINGF_DIR 0x1 /* TX or RX ring, set if TX */
+#define is_tx_ring(ring) ((ring)->flags & RINGF_DIR)
+#define is_rx_ring(ring) (!is_tx_ring(ring))
+#define ring_to_dma_dir(ring) (is_tx_ring(ring) ? \
+ DMA_TO_DEVICE : DMA_FROM_DEVICE)
+
+struct ring_stats {
+ u64 io_err_cnt;
+ u64 sw_err_cnt;
+ u64 seg_pkt_cnt;
+ union {
+ struct {
+ u64 tx_pkts;
+ u64 tx_bytes;
+ u64 tx_err_cnt;
+ u64 restart_queue;
+ u64 tx_busy;
+ };
+ struct {
+ u64 rx_pkts;
+ u64 rx_bytes;
+ u64 rx_err_cnt;
+ u64 reuse_pg_cnt;
+ u64 err_pkt_len;
+ u64 non_vld_descs;
+ u64 err_bd_num;
+ u64 l2_err;
+ u64 l3l4_csum_err;
+ };
+ };
+};
+
+struct hnae_queue;
+
+struct hnae_ring {
+ u8 __iomem *io_base; /* base io address for the ring */
+ struct hnae_desc *desc; /* dma map address space */
+ struct hnae_desc_cb *desc_cb;
+ struct hnae_queue *q;
+ int irq;
+ char ring_name[RCB_RING_NAME_LEN];
+
+ /* statistic */
+ struct ring_stats stats;
+
+ dma_addr_t desc_dma_addr;
+ u32 buf_size; /* size for hnae_desc->addr, preset by AE */
+ u16 desc_num; /* total number of desc */
+ u16 max_desc_num_per_pkt;
+ u16 max_raw_data_sz_per_desc;
+ u16 max_pkt_size;
+ int next_to_use; /* idx of next spare desc */
+
+ /* idx of lastest sent desc, the ring is empty when equal to
+ * next_to_use
+ */
+ int next_to_clean;
+
+ int flags; /* ring attribute */
+ int irq_init_flag;
+
+ /* total rx bytes after last rx rate calucated */
+ u64 coal_last_rx_bytes;
+ unsigned long coal_last_jiffies;
+ u32 coal_param;
+ u32 coal_rx_rate; /* rx rate in MB */
+};
+
+#define ring_ptr_move_fw(ring, p) \
+ ((ring)->p = ((ring)->p + 1) % (ring)->desc_num)
+#define ring_ptr_move_bw(ring, p) \
+ ((ring)->p = ((ring)->p - 1 + (ring)->desc_num) % (ring)->desc_num)
+
+enum hns_desc_type {
+ DESC_TYPE_SKB,
+ DESC_TYPE_PAGE,
+};
+
+#define assert_is_ring_idx(ring, idx) \
+ assert((idx) >= 0 && (idx) < (ring)->desc_num)
+
+/* the distance between [begin, end) in a ring buffer
+ * note: there is a unuse slot between the begin and the end
+ */
+static inline int ring_dist(struct hnae_ring *ring, int begin, int end)
+{
+ assert_is_ring_idx(ring, begin);
+ assert_is_ring_idx(ring, end);
+
+ return (end - begin + ring->desc_num) % ring->desc_num;
+}
+
+static inline int ring_space(struct hnae_ring *ring)
+{
+ return ring->desc_num -
+ ring_dist(ring, ring->next_to_clean, ring->next_to_use) - 1;
+}
+
+static inline int is_ring_empty(struct hnae_ring *ring)
+{
+ assert_is_ring_idx(ring, ring->next_to_use);
+ assert_is_ring_idx(ring, ring->next_to_clean);
+
+ return ring->next_to_use == ring->next_to_clean;
+}
+
+#define hnae_buf_size(_ring) ((_ring)->buf_size)
+#define hnae_page_order(_ring) (get_order(hnae_buf_size(_ring)))
+#define hnae_page_size(_ring) (PAGE_SIZE << hnae_page_order(_ring))
+
+struct hnae_handle;
+
+/* allocate and dma map space for hnae desc */
+struct hnae_buf_ops {
+ int (*alloc_buffer)(struct hnae_ring *ring, struct hnae_desc_cb *cb);
+ void (*free_buffer)(struct hnae_ring *ring, struct hnae_desc_cb *cb);
+ int (*map_buffer)(struct hnae_ring *ring, struct hnae_desc_cb *cb);
+ void (*unmap_buffer)(struct hnae_ring *ring, struct hnae_desc_cb *cb);
+};
+
+struct hnae_queue {
+ u8 __iomem *io_base;
+ phys_addr_t phy_base;
+ struct hnae_ae_dev *dev; /* the device who use this queue */
+ struct hnae_ring rx_ring ____cacheline_internodealigned_in_smp;
+ struct hnae_ring tx_ring ____cacheline_internodealigned_in_smp;
+ struct hnae_handle *handle;
+};
+
+/*hnae loop mode*/
+enum hnae_loop {
+ MAC_INTERNALLOOP_MAC = 0,
+ MAC_INTERNALLOOP_SERDES,
+ MAC_INTERNALLOOP_PHY,
+ MAC_LOOP_PHY_NONE,
+ MAC_LOOP_NONE,
+};
+
+/*hnae port type*/
+enum hnae_port_type {
+ HNAE_PORT_SERVICE = 0,
+ HNAE_PORT_DEBUG
+};
+
+/* mac media type */
+enum hnae_media_type {
+ HNAE_MEDIA_TYPE_UNKNOWN = 0,
+ HNAE_MEDIA_TYPE_FIBER,
+ HNAE_MEDIA_TYPE_COPPER,
+ HNAE_MEDIA_TYPE_BACKPLANE,
+};
+
+/* This struct defines the operation on the handle.
+ *
+ * get_handle(): (mandatory)
+ * Get a handle from AE according to its name and options.
+ * the AE driver should manage the space used by handle and its queues while
+ * the HNAE framework will allocate desc and desc_cb for all rings in the
+ * queues.
+ * put_handle():
+ * Release the handle.
+ * start():
+ * Enable the hardware, include all queues
+ * stop():
+ * Disable the hardware
+ * set_opts(): (mandatory)
+ * Set options to the AE
+ * get_opts(): (mandatory)
+ * Get options from the AE
+ * get_status():
+ * Get the carrier state of the back channel of the handle, 1 for ok, 0 for
+ * non-ok
+ * toggle_ring_irq(): (mandatory)
+ * Set the ring irq to be enabled(0) or disable(1)
+ * toggle_queue_status(): (mandatory)
+ * Set the queue to be enabled(1) or disable(0), this will not change the
+ * ring irq state
+ * adjust_link()
+ * adjust link status
+ * set_loopback()
+ * set loopback
+ * get_ring_bdnum_limit()
+ * get ring bd number limit
+ * get_pauseparam()
+ * get tx and rx of pause frame use
+ * set_pauseparam()
+ * set tx and rx of pause frame use
+ * get_coalesce_usecs()
+ * get usecs to delay a TX interrupt after a packet is sent
+ * get_rx_max_coalesced_frames()
+ * get Maximum number of packets to be sent before a TX interrupt.
+ * set_coalesce_usecs()
+ * set usecs to delay a TX interrupt after a packet is sent
+ * set_coalesce_frames()
+ * set Maximum number of packets to be sent before a TX interrupt.
+ * get_ringnum()
+ * get RX/TX ring number
+ * get_max_ringnum()
+ * get RX/TX ring maximum number
+ * get_mac_addr()
+ * get mac address
+ * set_mac_addr()
+ * set mac address
+ * clr_mc_addr()
+ * clear mcast tcam table
+ * set_mc_addr()
+ * set multicast mode
+ * add_uc_addr()
+ * add ucast address
+ * rm_uc_addr()
+ * remove ucast address
+ * set_mtu()
+ * set mtu
+ * update_stats()
+ * update Old network device statistics
+ * get_ethtool_stats()
+ * get ethtool network device statistics
+ * get_strings()
+ * get a set of strings that describe the requested objects
+ * get_sset_count()
+ * get number of strings that @get_strings will write
+ * update_led_status()
+ * update the led status
+ * set_led_id()
+ * set led id
+ * get_regs()
+ * get regs dump
+ * get_regs_len()
+ * get the len of the regs dump
+ */
+struct hnae_ae_ops {
+ struct hnae_handle *(*get_handle)(struct hnae_ae_dev *dev,
+ u32 port_id);
+ void (*put_handle)(struct hnae_handle *handle);
+ void (*init_queue)(struct hnae_queue *q);
+ void (*fini_queue)(struct hnae_queue *q);
+ int (*start)(struct hnae_handle *handle);
+ void (*stop)(struct hnae_handle *handle);
+ void (*reset)(struct hnae_handle *handle);
+ int (*set_opts)(struct hnae_handle *handle, int type, void *opts);
+ int (*get_opts)(struct hnae_handle *handle, int type, void **opts);
+ int (*get_status)(struct hnae_handle *handle);
+ int (*get_info)(struct hnae_handle *handle,
+ u8 *auto_neg, u16 *speed, u8 *duplex);
+ void (*toggle_ring_irq)(struct hnae_ring *ring, u32 val);
+ void (*adjust_link)(struct hnae_handle *handle, int speed, int duplex);
+ bool (*need_adjust_link)(struct hnae_handle *handle,
+ int speed, int duplex);
+ int (*set_loopback)(struct hnae_handle *handle,
+ enum hnae_loop loop_mode, int en);
+ void (*get_ring_bdnum_limit)(struct hnae_queue *queue,
+ u32 *uplimit);
+ void (*get_pauseparam)(struct hnae_handle *handle,
+ u32 *auto_neg, u32 *rx_en, u32 *tx_en);
+ int (*set_pauseparam)(struct hnae_handle *handle,
+ u32 auto_neg, u32 rx_en, u32 tx_en);
+ void (*get_coalesce_usecs)(struct hnae_handle *handle,
+ u32 *tx_usecs, u32 *rx_usecs);
+ void (*get_max_coalesced_frames)(struct hnae_handle *handle,
+ u32 *tx_frames, u32 *rx_frames);
+ int (*set_coalesce_usecs)(struct hnae_handle *handle, u32 timeout);
+ int (*set_coalesce_frames)(struct hnae_handle *handle,
+ u32 tx_frames, u32 rx_frames);
+ void (*get_coalesce_range)(struct hnae_handle *handle,
+ u32 *tx_frames_low, u32 *rx_frames_low,
+ u32 *tx_frames_high, u32 *rx_frames_high,
+ u32 *tx_usecs_low, u32 *rx_usecs_low,
+ u32 *tx_usecs_high, u32 *rx_usecs_high);
+ void (*set_promisc_mode)(struct hnae_handle *handle, u32 en);
+ int (*get_mac_addr)(struct hnae_handle *handle, void **p);
+ int (*set_mac_addr)(struct hnae_handle *handle, const void *p);
+ int (*add_uc_addr)(struct hnae_handle *handle,
+ const unsigned char *addr);
+ int (*rm_uc_addr)(struct hnae_handle *handle,
+ const unsigned char *addr);
+ int (*clr_mc_addr)(struct hnae_handle *handle);
+ int (*set_mc_addr)(struct hnae_handle *handle, void *addr);
+ int (*set_mtu)(struct hnae_handle *handle, int new_mtu);
+ void (*set_tso_stats)(struct hnae_handle *handle, int enable);
+ void (*update_stats)(struct hnae_handle *handle,
+ struct net_device_stats *net_stats);
+ void (*get_stats)(struct hnae_handle *handle, u64 *data);
+ void (*get_strings)(struct hnae_handle *handle,
+ u32 stringset, u8 *data);
+ int (*get_sset_count)(struct hnae_handle *handle, int stringset);
+ void (*update_led_status)(struct hnae_handle *handle);
+ int (*set_led_id)(struct hnae_handle *handle,
+ enum hnae_led_state status);
+ void (*get_regs)(struct hnae_handle *handle, void *data);
+ int (*get_regs_len)(struct hnae_handle *handle);
+ u32 (*get_rss_key_size)(struct hnae_handle *handle);
+ u32 (*get_rss_indir_size)(struct hnae_handle *handle);
+ int (*get_rss)(struct hnae_handle *handle, u32 *indir, u8 *key,
+ u8 *hfunc);
+ int (*set_rss)(struct hnae_handle *handle, const u32 *indir,
+ const u8 *key, const u8 hfunc);
+};
+
+struct hnae_ae_dev {
+ struct device cls_dev; /* the class dev */
+ struct device *dev; /* the presented dev */
+ struct hnae_ae_ops *ops;
+ struct list_head node;
+ struct module *owner; /* the module who provides this dev */
+ int id;
+ char name[AE_NAME_SIZE];
+ struct list_head handle_list;
+ spinlock_t lock; /* lock to protect the handle_list */
+};
+
+struct hnae_handle {
+ struct device *owner_dev; /* the device which make use of this handle */
+ struct hnae_ae_dev *dev; /* the device who provides this handle */
+ struct phy_device *phy_dev;
+ phy_interface_t phy_if;
+ u32 if_support;
+ int q_num;
+ int vf_id;
+ unsigned long coal_last_jiffies;
+ u32 coal_param; /* self adapt coalesce param */
+ /* the ring index of last ring that set coal param */
+ u32 coal_ring_idx;
+ u32 eport_id;
+ u32 dport_id; /* v2 tx bd should fill the dport_id */
+ bool coal_adapt_en;
+ enum hnae_port_type port_type;
+ enum hnae_media_type media_type;
+ struct list_head node; /* list to hnae_ae_dev->handle_list */
+ struct hnae_buf_ops *bops; /* operation for the buffer */
+ struct hnae_queue *qs[]; /* flexible array of all queues */
+};
+
+#define ring_to_dev(ring) ((ring)->q->dev->dev)
+
+struct hnae_handle *hnae_get_handle(struct device *owner_dev,
+ const struct fwnode_handle *fwnode,
+ u32 port_id,
+ struct hnae_buf_ops *bops);
+
+void hnae_put_handle(struct hnae_handle *handle);
+int hnae_ae_register(struct hnae_ae_dev *dev, struct module *owner);
+void hnae_ae_unregister(struct hnae_ae_dev *dev);
+
+int hnae_register_notifier(struct notifier_block *nb);
+void hnae_unregister_notifier(struct notifier_block *nb);
+int hnae_reinit_handle(struct hnae_handle *handle);
+
+#define hnae_queue_xmit(q, buf_num) writel_relaxed(buf_num, \
+ (q)->tx_ring.io_base + RCB_REG_TAIL)
+
+#ifndef assert
+#define assert(cond)
+#endif
+
+static inline int hnae_reserve_buffer_map(struct hnae_ring *ring,
+ struct hnae_desc_cb *cb)
+{
+ struct hnae_buf_ops *bops = ring->q->handle->bops;
+ int ret;
+
+ ret = bops->alloc_buffer(ring, cb);
+ if (ret)
+ goto out;
+
+ ret = bops->map_buffer(ring, cb);
+ if (ret)
+ goto out_with_buf;
+
+ return 0;
+
+out_with_buf:
+ bops->free_buffer(ring, cb);
+out:
+ return ret;
+}
+
+static inline int hnae_alloc_buffer_attach(struct hnae_ring *ring, int i)
+{
+ int ret = hnae_reserve_buffer_map(ring, &ring->desc_cb[i]);
+
+ if (ret)
+ return ret;
+
+ ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
+
+ return 0;
+}
+
+static inline void hnae_buffer_detach(struct hnae_ring *ring, int i)
+{
+ ring->q->handle->bops->unmap_buffer(ring, &ring->desc_cb[i]);
+ ring->desc[i].addr = 0;
+}
+
+static inline void hnae_free_buffer_detach(struct hnae_ring *ring, int i)
+{
+ struct hnae_buf_ops *bops = ring->q->handle->bops;
+ struct hnae_desc_cb *cb = &ring->desc_cb[i];
+
+ if (!ring->desc_cb[i].dma)
+ return;
+
+ hnae_buffer_detach(ring, i);
+ bops->free_buffer(ring, cb);
+}
+
+/* detach a in-used buffer and replace with a reserved one */
+static inline void hnae_replace_buffer(struct hnae_ring *ring, int i,
+ struct hnae_desc_cb *res_cb)
+{
+ struct hnae_buf_ops *bops = ring->q->handle->bops;
+
+ bops->unmap_buffer(ring, &ring->desc_cb[i]);
+ ring->desc_cb[i] = *res_cb;
+ ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
+ ring->desc[i].rx.ipoff_bnum_pid_flag = 0;
+}
+
+static inline void hnae_reuse_buffer(struct hnae_ring *ring, int i)
+{
+ ring->desc_cb[i].reuse_flag = 0;
+ ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma
+ + ring->desc_cb[i].page_offset);
+ ring->desc[i].rx.ipoff_bnum_pid_flag = 0;
+}
+
+/* when reinit buffer size, we should reinit buffer description */
+static inline void hnae_reinit_all_ring_desc(struct hnae_handle *h)
+{
+ int i, j;
+ struct hnae_ring *ring;
+
+ for (i = 0; i < h->q_num; i++) {
+ ring = &h->qs[i]->rx_ring;
+ for (j = 0; j < ring->desc_num; j++)
+ ring->desc[j].addr = cpu_to_le64(ring->desc_cb[j].dma);
+ }
+
+ wmb(); /* commit all data before submit */
+}
+
+/* when reinit buffer size, we should reinit page offset */
+static inline void hnae_reinit_all_ring_page_off(struct hnae_handle *h)
+{
+ int i, j;
+ struct hnae_ring *ring;
+
+ for (i = 0; i < h->q_num; i++) {
+ ring = &h->qs[i]->rx_ring;
+ for (j = 0; j < ring->desc_num; j++) {
+ ring->desc_cb[j].page_offset = 0;
+ if (ring->desc[j].addr !=
+ cpu_to_le64(ring->desc_cb[j].dma))
+ ring->desc[j].addr =
+ cpu_to_le64(ring->desc_cb[j].dma);
+ }
+ }
+
+ wmb(); /* commit all data before submit */
+}
+
+#define hnae_set_field(origin, mask, shift, val) \
+ do { \
+ (origin) &= (~(mask)); \
+ (origin) |= ((val) << (shift)) & (mask); \
+ } while (0)
+
+#define hnae_set_bit(origin, shift, val) \
+ hnae_set_field((origin), (0x1 << (shift)), (shift), (val))
+
+#define hnae_get_field(origin, mask, shift) (((origin) & (mask)) >> (shift))
+
+#define hnae_get_bit(origin, shift) \
+ hnae_get_field((origin), (0x1 << (shift)), (shift))
+
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
new file mode 100644
index 0000000000..bc3e406f01
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -0,0 +1,1002 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
+#include <linux/spinlock.h>
+
+#include "hnae.h"
+#include "hns_dsaf_mac.h"
+#include "hns_dsaf_main.h"
+#include "hns_dsaf_ppe.h"
+#include "hns_dsaf_rcb.h"
+
+static struct hns_mac_cb *hns_get_mac_cb(struct hnae_handle *handle)
+{
+ struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
+
+ return vf_cb->mac_cb;
+}
+
+static struct dsaf_device *hns_ae_get_dsaf_dev(struct hnae_ae_dev *dev)
+{
+ return container_of(dev, struct dsaf_device, ae_dev);
+}
+
+static struct hns_ppe_cb *hns_get_ppe_cb(struct hnae_handle *handle)
+{
+ int ppe_index;
+ struct ppe_common_cb *ppe_comm;
+ struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
+
+ ppe_comm = vf_cb->dsaf_dev->ppe_common[0];
+ ppe_index = vf_cb->port_index;
+
+ return &ppe_comm->ppe_cb[ppe_index];
+}
+
+static int hns_ae_get_q_num_per_vf(
+ struct dsaf_device *dsaf_dev, int port)
+{
+ return dsaf_dev->rcb_common[0]->max_q_per_vf;
+}
+
+static int hns_ae_get_vf_num_per_port(
+ struct dsaf_device *dsaf_dev, int port)
+{
+ return dsaf_dev->rcb_common[0]->max_vfn;
+}
+
+static struct ring_pair_cb *hns_ae_get_base_ring_pair(
+ struct dsaf_device *dsaf_dev, int port)
+{
+ struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[0];
+ int q_num = rcb_comm->max_q_per_vf;
+ int vf_num = rcb_comm->max_vfn;
+
+ return &rcb_comm->ring_pair_cb[port * q_num * vf_num];
+}
+
+static struct ring_pair_cb *hns_ae_get_ring_pair(struct hnae_queue *q)
+{
+ return container_of(q, struct ring_pair_cb, q);
+}
+
+static struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev,
+ u32 port_id)
+{
+ int vfnum_per_port;
+ int qnum_per_vf;
+ int i;
+ struct dsaf_device *dsaf_dev;
+ struct hnae_handle *ae_handle;
+ struct ring_pair_cb *ring_pair_cb;
+ struct hnae_vf_cb *vf_cb;
+
+ dsaf_dev = hns_ae_get_dsaf_dev(dev);
+
+ ring_pair_cb = hns_ae_get_base_ring_pair(dsaf_dev, port_id);
+ vfnum_per_port = hns_ae_get_vf_num_per_port(dsaf_dev, port_id);
+ qnum_per_vf = hns_ae_get_q_num_per_vf(dsaf_dev, port_id);
+
+ vf_cb = kzalloc(struct_size(vf_cb, ae_handle.qs, qnum_per_vf),
+ GFP_KERNEL);
+ if (unlikely(!vf_cb)) {
+ dev_err(dsaf_dev->dev, "malloc vf_cb fail!\n");
+ ae_handle = ERR_PTR(-ENOMEM);
+ goto handle_err;
+ }
+ ae_handle = &vf_cb->ae_handle;
+ /* ae_handle Init */
+ ae_handle->owner_dev = dsaf_dev->dev;
+ ae_handle->dev = dev;
+ ae_handle->q_num = qnum_per_vf;
+ ae_handle->coal_param = HNAE_LOWEST_LATENCY_COAL_PARAM;
+
+ /* find ring pair, and set vf id*/
+ for (ae_handle->vf_id = 0;
+ ae_handle->vf_id < vfnum_per_port; ae_handle->vf_id++) {
+ if (!ring_pair_cb->used_by_vf)
+ break;
+ ring_pair_cb += qnum_per_vf;
+ }
+ if (ae_handle->vf_id >= vfnum_per_port) {
+ dev_err(dsaf_dev->dev, "malloc queue fail!\n");
+ ae_handle = ERR_PTR(-EINVAL);
+ goto vf_id_err;
+ }
+
+ for (i = 0; i < qnum_per_vf; i++) {
+ ae_handle->qs[i] = &ring_pair_cb->q;
+ ae_handle->qs[i]->rx_ring.q = ae_handle->qs[i];
+ ae_handle->qs[i]->tx_ring.q = ae_handle->qs[i];
+
+ ring_pair_cb->used_by_vf = 1;
+ ring_pair_cb++;
+ }
+
+ vf_cb->dsaf_dev = dsaf_dev;
+ vf_cb->port_index = port_id;
+ vf_cb->mac_cb = dsaf_dev->mac_cb[port_id];
+
+ ae_handle->phy_if = vf_cb->mac_cb->phy_if;
+ ae_handle->phy_dev = vf_cb->mac_cb->phy_dev;
+ ae_handle->if_support = vf_cb->mac_cb->if_support;
+ ae_handle->port_type = vf_cb->mac_cb->mac_type;
+ ae_handle->media_type = vf_cb->mac_cb->media_type;
+ ae_handle->dport_id = port_id;
+
+ return ae_handle;
+vf_id_err:
+ kfree(vf_cb);
+handle_err:
+ return ae_handle;
+}
+
+static void hns_ae_put_handle(struct hnae_handle *handle)
+{
+ struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
+ int i;
+
+ for (i = 0; i < handle->q_num; i++)
+ hns_ae_get_ring_pair(handle->qs[i])->used_by_vf = 0;
+
+ kfree(vf_cb);
+}
+
+static int hns_ae_wait_flow_down(struct hnae_handle *handle)
+{
+ struct dsaf_device *dsaf_dev;
+ struct hns_ppe_cb *ppe_cb;
+ struct hnae_vf_cb *vf_cb;
+ int ret;
+ int i;
+
+ for (i = 0; i < handle->q_num; i++) {
+ ret = hns_rcb_wait_tx_ring_clean(handle->qs[i]);
+ if (ret)
+ return ret;
+ }
+
+ ppe_cb = hns_get_ppe_cb(handle);
+ ret = hns_ppe_wait_tx_fifo_clean(ppe_cb);
+ if (ret)
+ return ret;
+
+ dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
+ if (!dsaf_dev)
+ return -EINVAL;
+ ret = hns_dsaf_wait_pkt_clean(dsaf_dev, handle->dport_id);
+ if (ret)
+ return ret;
+
+ vf_cb = hns_ae_get_vf_cb(handle);
+ ret = hns_mac_wait_fifo_clean(vf_cb->mac_cb);
+ if (ret)
+ return ret;
+
+ mdelay(10);
+ return 0;
+}
+
+static void hns_ae_ring_enable_all(struct hnae_handle *handle, int val)
+{
+ int q_num = handle->q_num;
+ int i;
+
+ for (i = 0; i < q_num; i++)
+ hns_rcb_ring_enable_hw(handle->qs[i], val);
+}
+
+static void hns_ae_init_queue(struct hnae_queue *q)
+{
+ struct ring_pair_cb *ring =
+ container_of(q, struct ring_pair_cb, q);
+
+ hns_rcb_init_hw(ring);
+}
+
+static void hns_ae_fini_queue(struct hnae_queue *q)
+{
+ struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(q->handle);
+
+ if (vf_cb->mac_cb->mac_type == HNAE_PORT_SERVICE)
+ hns_rcb_reset_ring_hw(q);
+}
+
+static int hns_ae_set_mac_address(struct hnae_handle *handle, const void *p)
+{
+ int ret;
+ struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+
+ if (!p || !is_valid_ether_addr((const u8 *)p)) {
+ dev_err(handle->owner_dev, "is not valid ether addr !\n");
+ return -EADDRNOTAVAIL;
+ }
+
+ ret = hns_mac_change_vf_addr(mac_cb, handle->vf_id, p);
+ if (ret != 0) {
+ dev_err(handle->owner_dev,
+ "set_mac_address fail, ret=%d!\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hns_ae_add_uc_address(struct hnae_handle *handle,
+ const unsigned char *addr)
+{
+ struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+
+ if (mac_cb->mac_type != HNAE_PORT_SERVICE)
+ return -ENOSPC;
+
+ return hns_mac_add_uc_addr(mac_cb, handle->vf_id, addr);
+}
+
+static int hns_ae_rm_uc_address(struct hnae_handle *handle,
+ const unsigned char *addr)
+{
+ struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+
+ if (mac_cb->mac_type != HNAE_PORT_SERVICE)
+ return -ENOSPC;
+
+ return hns_mac_rm_uc_addr(mac_cb, handle->vf_id, addr);
+}
+
+static int hns_ae_set_multicast_one(struct hnae_handle *handle, void *addr)
+{
+ int ret;
+ char *mac_addr = (char *)addr;
+ struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+ u8 port_num;
+
+ assert(mac_cb);
+
+ if (mac_cb->mac_type != HNAE_PORT_SERVICE)
+ return 0;
+
+ ret = hns_mac_set_multi(mac_cb, mac_cb->mac_id, mac_addr, true);
+ if (ret) {
+ dev_err(handle->owner_dev,
+ "mac add mul_mac:%pM port%d fail, ret = %#x!\n",
+ mac_addr, mac_cb->mac_id, ret);
+ return ret;
+ }
+
+ ret = hns_mac_get_inner_port_num(mac_cb, handle->vf_id, &port_num);
+ if (ret)
+ return ret;
+
+ ret = hns_mac_set_multi(mac_cb, port_num, mac_addr, true);
+ if (ret)
+ dev_err(handle->owner_dev,
+ "mac add mul_mac:%pM port%d fail, ret = %#x!\n",
+ mac_addr, DSAF_BASE_INNER_PORT_NUM, ret);
+
+ return ret;
+}
+
+static int hns_ae_clr_multicast(struct hnae_handle *handle)
+{
+ struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+
+ if (mac_cb->mac_type != HNAE_PORT_SERVICE)
+ return 0;
+
+ return hns_mac_clr_multicast(mac_cb, handle->vf_id);
+}
+
+static int hns_ae_set_mtu(struct hnae_handle *handle, int new_mtu)
+{
+ struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+ struct hnae_queue *q;
+ u32 rx_buf_size;
+ int i, ret;
+
+ /* when buf_size is 2048, max mtu is 6K for rx ring max bd num is 3. */
+ if (!AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver)) {
+ if (new_mtu <= BD_SIZE_2048_MAX_MTU)
+ rx_buf_size = 2048;
+ else
+ rx_buf_size = 4096;
+ } else {
+ rx_buf_size = mac_cb->dsaf_dev->buf_size;
+ }
+
+ ret = hns_mac_set_mtu(mac_cb, new_mtu, rx_buf_size);
+
+ if (!ret) {
+ /* reinit ring buf_size */
+ for (i = 0; i < handle->q_num; i++) {
+ q = handle->qs[i];
+ q->rx_ring.buf_size = rx_buf_size;
+ hns_rcb_set_rx_ring_bs(q, rx_buf_size);
+ }
+ }
+
+ return ret;
+}
+
+static void hns_ae_set_tso_stats(struct hnae_handle *handle, int enable)
+{
+ struct hns_ppe_cb *ppe_cb = hns_get_ppe_cb(handle);
+
+ hns_ppe_set_tso_enable(ppe_cb, enable);
+}
+
+static int hns_ae_start(struct hnae_handle *handle)
+{
+ int ret;
+ int k;
+ struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+
+ ret = hns_mac_vm_config_bc_en(mac_cb, 0, true);
+ if (ret)
+ return ret;
+
+ for (k = 0; k < handle->q_num; k++) {
+ if (AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver))
+ hns_rcb_int_clr_hw(handle->qs[k],
+ RCB_INT_FLAG_TX | RCB_INT_FLAG_RX);
+ else
+ hns_rcbv2_int_clr_hw(handle->qs[k],
+ RCB_INT_FLAG_TX | RCB_INT_FLAG_RX);
+ }
+ hns_ae_ring_enable_all(handle, 1);
+ msleep(100);
+
+ hns_mac_start(mac_cb);
+
+ return 0;
+}
+
+static void hns_ae_stop(struct hnae_handle *handle)
+{
+ struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+
+ /* just clean tx fbd, neednot rx fbd*/
+ hns_rcb_wait_fbd_clean(handle->qs, handle->q_num, RCB_INT_FLAG_TX);
+
+ msleep(20);
+
+ hns_mac_stop(mac_cb);
+
+ usleep_range(10000, 20000);
+
+ hns_ae_ring_enable_all(handle, 0);
+
+ /* clean rx fbd. */
+ hns_rcb_wait_fbd_clean(handle->qs, handle->q_num, RCB_INT_FLAG_RX);
+
+ (void)hns_mac_vm_config_bc_en(mac_cb, 0, false);
+}
+
+static void hns_ae_reset(struct hnae_handle *handle)
+{
+ struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
+
+ if (vf_cb->mac_cb->mac_type == HNAE_PORT_DEBUG) {
+ hns_mac_reset(vf_cb->mac_cb);
+ hns_ppe_reset_common(vf_cb->dsaf_dev, 0);
+ }
+}
+
+static void hns_ae_toggle_ring_irq(struct hnae_ring *ring, u32 mask)
+{
+ u32 flag;
+
+ if (is_tx_ring(ring))
+ flag = RCB_INT_FLAG_TX;
+ else
+ flag = RCB_INT_FLAG_RX;
+
+ hns_rcb_int_ctrl_hw(ring->q, flag, mask);
+}
+
+static void hns_aev2_toggle_ring_irq(struct hnae_ring *ring, u32 mask)
+{
+ u32 flag;
+
+ if (is_tx_ring(ring))
+ flag = RCB_INT_FLAG_TX;
+ else
+ flag = RCB_INT_FLAG_RX;
+
+ hns_rcbv2_int_ctrl_hw(ring->q, flag, mask);
+}
+
+static int hns_ae_get_link_status(struct hnae_handle *handle)
+{
+ u32 link_status;
+ struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+
+ hns_mac_get_link_status(mac_cb, &link_status);
+
+ return !!link_status;
+}
+
+static int hns_ae_get_mac_info(struct hnae_handle *handle,
+ u8 *auto_neg, u16 *speed, u8 *duplex)
+{
+ struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+
+ return hns_mac_get_port_info(mac_cb, auto_neg, speed, duplex);
+}
+
+static bool hns_ae_need_adjust_link(struct hnae_handle *handle, int speed,
+ int duplex)
+{
+ struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+
+ return hns_mac_need_adjust_link(mac_cb, speed, duplex);
+}
+
+static void hns_ae_adjust_link(struct hnae_handle *handle, int speed,
+ int duplex)
+{
+ struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+
+ switch (mac_cb->dsaf_dev->dsaf_ver) {
+ case AE_VERSION_1:
+ hns_mac_adjust_link(mac_cb, speed, duplex);
+ break;
+
+ case AE_VERSION_2:
+ /* chip need to clear all pkt inside */
+ hns_mac_disable(mac_cb, MAC_COMM_MODE_RX);
+ if (hns_ae_wait_flow_down(handle)) {
+ hns_mac_enable(mac_cb, MAC_COMM_MODE_RX);
+ break;
+ }
+
+ hns_mac_adjust_link(mac_cb, speed, duplex);
+ hns_mac_enable(mac_cb, MAC_COMM_MODE_RX);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void hns_ae_get_ring_bdnum_limit(struct hnae_queue *queue,
+ u32 *uplimit)
+{
+ *uplimit = HNS_RCB_RING_MAX_PENDING_BD;
+}
+
+static void hns_ae_get_pauseparam(struct hnae_handle *handle,
+ u32 *auto_neg, u32 *rx_en, u32 *tx_en)
+{
+ struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+ struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
+
+ hns_mac_get_autoneg(mac_cb, auto_neg);
+
+ hns_mac_get_pauseparam(mac_cb, rx_en, tx_en);
+
+ /* Service port's pause feature is provided by DSAF, not mac */
+ if (handle->port_type == HNAE_PORT_SERVICE)
+ hns_dsaf_get_rx_mac_pause_en(dsaf_dev, mac_cb->mac_id, rx_en);
+}
+
+static void hns_ae_set_promisc_mode(struct hnae_handle *handle, u32 en)
+{
+ struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+
+ hns_dsaf_set_promisc_mode(hns_ae_get_dsaf_dev(handle->dev), en);
+ hns_mac_set_promisc(mac_cb, (u8)!!en);
+}
+
+static int hns_ae_set_pauseparam(struct hnae_handle *handle,
+ u32 autoneg, u32 rx_en, u32 tx_en)
+{
+ struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+ struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
+ int ret;
+
+ ret = hns_mac_set_autoneg(mac_cb, autoneg);
+ if (ret)
+ return ret;
+
+ /* Service port's pause feature is provided by DSAF, not mac */
+ if (handle->port_type == HNAE_PORT_SERVICE) {
+ ret = hns_dsaf_set_rx_mac_pause_en(dsaf_dev,
+ mac_cb->mac_id, rx_en);
+ if (ret)
+ return ret;
+ rx_en = 0;
+ }
+ return hns_mac_set_pauseparam(mac_cb, rx_en, tx_en);
+}
+
+static void hns_ae_get_coalesce_usecs(struct hnae_handle *handle,
+ u32 *tx_usecs, u32 *rx_usecs)
+{
+ struct ring_pair_cb *ring_pair =
+ container_of(handle->qs[0], struct ring_pair_cb, q);
+
+ *tx_usecs = hns_rcb_get_coalesce_usecs(ring_pair->rcb_common,
+ ring_pair->port_id_in_comm);
+ *rx_usecs = hns_rcb_get_coalesce_usecs(ring_pair->rcb_common,
+ ring_pair->port_id_in_comm);
+}
+
+static void hns_ae_get_max_coalesced_frames(struct hnae_handle *handle,
+ u32 *tx_frames, u32 *rx_frames)
+{
+ struct ring_pair_cb *ring_pair =
+ container_of(handle->qs[0], struct ring_pair_cb, q);
+ struct dsaf_device *dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
+
+ if (AE_IS_VER1(dsaf_dev->dsaf_ver) ||
+ handle->port_type == HNAE_PORT_DEBUG)
+ *tx_frames = hns_rcb_get_rx_coalesced_frames(
+ ring_pair->rcb_common, ring_pair->port_id_in_comm);
+ else
+ *tx_frames = hns_rcb_get_tx_coalesced_frames(
+ ring_pair->rcb_common, ring_pair->port_id_in_comm);
+ *rx_frames = hns_rcb_get_rx_coalesced_frames(ring_pair->rcb_common,
+ ring_pair->port_id_in_comm);
+}
+
+static int hns_ae_set_coalesce_usecs(struct hnae_handle *handle,
+ u32 timeout)
+{
+ struct ring_pair_cb *ring_pair =
+ container_of(handle->qs[0], struct ring_pair_cb, q);
+
+ return hns_rcb_set_coalesce_usecs(
+ ring_pair->rcb_common, ring_pair->port_id_in_comm, timeout);
+}
+
+static int hns_ae_set_coalesce_frames(struct hnae_handle *handle,
+ u32 tx_frames, u32 rx_frames)
+{
+ int ret;
+ struct ring_pair_cb *ring_pair =
+ container_of(handle->qs[0], struct ring_pair_cb, q);
+ struct dsaf_device *dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
+
+ if (AE_IS_VER1(dsaf_dev->dsaf_ver) ||
+ handle->port_type == HNAE_PORT_DEBUG) {
+ if (tx_frames != rx_frames)
+ return -EINVAL;
+ return hns_rcb_set_rx_coalesced_frames(
+ ring_pair->rcb_common,
+ ring_pair->port_id_in_comm, rx_frames);
+ } else {
+ if (tx_frames != 1)
+ return -EINVAL;
+ ret = hns_rcb_set_tx_coalesced_frames(
+ ring_pair->rcb_common,
+ ring_pair->port_id_in_comm, tx_frames);
+ if (ret)
+ return ret;
+
+ return hns_rcb_set_rx_coalesced_frames(
+ ring_pair->rcb_common,
+ ring_pair->port_id_in_comm, rx_frames);
+ }
+}
+
+static void hns_ae_get_coalesce_range(struct hnae_handle *handle,
+ u32 *tx_frames_low, u32 *rx_frames_low,
+ u32 *tx_frames_high, u32 *rx_frames_high,
+ u32 *tx_usecs_low, u32 *rx_usecs_low,
+ u32 *tx_usecs_high, u32 *rx_usecs_high)
+{
+ struct dsaf_device *dsaf_dev;
+
+ assert(handle);
+
+ dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
+
+ *tx_frames_low = HNS_RCB_TX_FRAMES_LOW;
+ *rx_frames_low = HNS_RCB_RX_FRAMES_LOW;
+
+ if (AE_IS_VER1(dsaf_dev->dsaf_ver) ||
+ handle->port_type == HNAE_PORT_DEBUG)
+ *tx_frames_high =
+ (dsaf_dev->desc_num - 1 > HNS_RCB_TX_FRAMES_HIGH) ?
+ HNS_RCB_TX_FRAMES_HIGH : dsaf_dev->desc_num - 1;
+ else
+ *tx_frames_high = 1;
+
+ *rx_frames_high = (dsaf_dev->desc_num - 1 > HNS_RCB_RX_FRAMES_HIGH) ?
+ HNS_RCB_RX_FRAMES_HIGH : dsaf_dev->desc_num - 1;
+ *tx_usecs_low = HNS_RCB_TX_USECS_LOW;
+ *rx_usecs_low = HNS_RCB_RX_USECS_LOW;
+ *tx_usecs_high = HNS_RCB_TX_USECS_HIGH;
+ *rx_usecs_high = HNS_RCB_RX_USECS_HIGH;
+}
+
+static void hns_ae_update_stats(struct hnae_handle *handle,
+ struct net_device_stats *net_stats)
+{
+ int port;
+ int idx;
+ struct dsaf_device *dsaf_dev;
+ struct hns_mac_cb *mac_cb;
+ struct hns_ppe_cb *ppe_cb;
+ struct hnae_queue *queue;
+ struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
+ u64 tx_bytes = 0, rx_bytes = 0, tx_packets = 0, rx_packets = 0;
+ u64 rx_errors = 0, tx_errors = 0, tx_dropped = 0;
+ u64 rx_missed_errors;
+
+ dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
+ if (!dsaf_dev)
+ return;
+ port = vf_cb->port_index;
+ ppe_cb = hns_get_ppe_cb(handle);
+ mac_cb = hns_get_mac_cb(handle);
+
+ for (idx = 0; idx < handle->q_num; idx++) {
+ queue = handle->qs[idx];
+ hns_rcb_update_stats(queue);
+
+ tx_bytes += queue->tx_ring.stats.tx_bytes;
+ tx_packets += queue->tx_ring.stats.tx_pkts;
+ rx_bytes += queue->rx_ring.stats.rx_bytes;
+ rx_packets += queue->rx_ring.stats.rx_pkts;
+
+ rx_errors += queue->rx_ring.stats.err_pkt_len
+ + queue->rx_ring.stats.l2_err
+ + queue->rx_ring.stats.l3l4_csum_err;
+ }
+
+ hns_ppe_update_stats(ppe_cb);
+ rx_missed_errors = ppe_cb->hw_stats.rx_drop_no_buf;
+ tx_errors += ppe_cb->hw_stats.tx_err_checksum
+ + ppe_cb->hw_stats.tx_err_fifo_empty;
+
+ if (mac_cb->mac_type == HNAE_PORT_SERVICE) {
+ hns_dsaf_update_stats(dsaf_dev, port);
+ /* for port upline direction, i.e., rx. */
+ rx_missed_errors += dsaf_dev->hw_stats[port].bp_drop;
+ rx_missed_errors += dsaf_dev->hw_stats[port].pad_drop;
+ rx_missed_errors += dsaf_dev->hw_stats[port].crc_false;
+
+ /* for port downline direction, i.e., tx. */
+ port = port + DSAF_PPE_INODE_BASE;
+ hns_dsaf_update_stats(dsaf_dev, port);
+ tx_dropped += dsaf_dev->hw_stats[port].bp_drop;
+ tx_dropped += dsaf_dev->hw_stats[port].pad_drop;
+ tx_dropped += dsaf_dev->hw_stats[port].crc_false;
+ tx_dropped += dsaf_dev->hw_stats[port].rslt_drop;
+ tx_dropped += dsaf_dev->hw_stats[port].vlan_drop;
+ tx_dropped += dsaf_dev->hw_stats[port].stp_drop;
+ }
+
+ hns_mac_update_stats(mac_cb);
+ rx_errors += mac_cb->hw_stats.rx_fifo_overrun_err;
+
+ tx_errors += mac_cb->hw_stats.tx_bad_pkts
+ + mac_cb->hw_stats.tx_fragment_err
+ + mac_cb->hw_stats.tx_jabber_err
+ + mac_cb->hw_stats.tx_underrun_err
+ + mac_cb->hw_stats.tx_crc_err;
+
+ net_stats->tx_bytes = tx_bytes;
+ net_stats->tx_packets = tx_packets;
+ net_stats->rx_bytes = rx_bytes;
+ net_stats->rx_dropped = 0;
+ net_stats->rx_packets = rx_packets;
+ net_stats->rx_errors = rx_errors;
+ net_stats->tx_errors = tx_errors;
+ net_stats->tx_dropped = tx_dropped;
+ net_stats->rx_missed_errors = rx_missed_errors;
+ net_stats->rx_crc_errors = mac_cb->hw_stats.rx_fcs_err;
+ net_stats->rx_frame_errors = mac_cb->hw_stats.rx_align_err;
+ net_stats->rx_fifo_errors = mac_cb->hw_stats.rx_fifo_overrun_err;
+ net_stats->rx_length_errors = mac_cb->hw_stats.rx_len_err;
+ net_stats->multicast = mac_cb->hw_stats.rx_mc_pkts;
+}
+
+static void hns_ae_get_stats(struct hnae_handle *handle, u64 *data)
+{
+ int idx;
+ struct hns_mac_cb *mac_cb;
+ struct hns_ppe_cb *ppe_cb;
+ u64 *p = data;
+ struct hnae_vf_cb *vf_cb;
+
+ if (!handle || !data) {
+ pr_err("hns_ae_get_stats NULL handle or data pointer!\n");
+ return;
+ }
+
+ vf_cb = hns_ae_get_vf_cb(handle);
+ mac_cb = hns_get_mac_cb(handle);
+ ppe_cb = hns_get_ppe_cb(handle);
+
+ for (idx = 0; idx < handle->q_num; idx++) {
+ hns_rcb_get_stats(handle->qs[idx], p);
+ p += hns_rcb_get_ring_sset_count((int)ETH_SS_STATS);
+ }
+
+ hns_ppe_get_stats(ppe_cb, p);
+ p += hns_ppe_get_sset_count((int)ETH_SS_STATS);
+
+ hns_mac_get_stats(mac_cb, p);
+ p += hns_mac_get_sset_count(mac_cb, (int)ETH_SS_STATS);
+
+ if (mac_cb->mac_type == HNAE_PORT_SERVICE)
+ hns_dsaf_get_stats(vf_cb->dsaf_dev, p, vf_cb->port_index);
+}
+
+static void hns_ae_get_strings(struct hnae_handle *handle,
+ u32 stringset, u8 *data)
+{
+ int port;
+ int idx;
+ struct hns_mac_cb *mac_cb;
+ struct hns_ppe_cb *ppe_cb;
+ struct dsaf_device *dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
+ u8 *p = data;
+ struct hnae_vf_cb *vf_cb;
+
+ assert(handle);
+
+ vf_cb = hns_ae_get_vf_cb(handle);
+ port = vf_cb->port_index;
+ mac_cb = hns_get_mac_cb(handle);
+ ppe_cb = hns_get_ppe_cb(handle);
+
+ for (idx = 0; idx < handle->q_num; idx++) {
+ hns_rcb_get_strings(stringset, p, idx);
+ p += ETH_GSTRING_LEN * hns_rcb_get_ring_sset_count(stringset);
+ }
+
+ hns_ppe_get_strings(ppe_cb, stringset, p);
+ p += ETH_GSTRING_LEN * hns_ppe_get_sset_count(stringset);
+
+ hns_mac_get_strings(mac_cb, stringset, p);
+ p += ETH_GSTRING_LEN * hns_mac_get_sset_count(mac_cb, stringset);
+
+ if (mac_cb->mac_type == HNAE_PORT_SERVICE)
+ hns_dsaf_get_strings(stringset, p, port, dsaf_dev);
+}
+
+static int hns_ae_get_sset_count(struct hnae_handle *handle, int stringset)
+{
+ u32 sset_count = 0;
+ struct hns_mac_cb *mac_cb;
+ struct dsaf_device *dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
+
+ assert(handle);
+
+ mac_cb = hns_get_mac_cb(handle);
+
+ sset_count += hns_rcb_get_ring_sset_count(stringset) * handle->q_num;
+ sset_count += hns_ppe_get_sset_count(stringset);
+ sset_count += hns_mac_get_sset_count(mac_cb, stringset);
+
+ if (mac_cb->mac_type == HNAE_PORT_SERVICE)
+ sset_count += hns_dsaf_get_sset_count(dsaf_dev, stringset);
+
+ return sset_count;
+}
+
+static int hns_ae_config_loopback(struct hnae_handle *handle,
+ enum hnae_loop loop, int en)
+{
+ int ret;
+ struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
+ struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+ struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
+
+ switch (loop) {
+ case MAC_INTERNALLOOP_PHY:
+ ret = 0;
+ break;
+ case MAC_INTERNALLOOP_SERDES:
+ ret = dsaf_dev->misc_op->cfg_serdes_loopback(vf_cb->mac_cb,
+ !!en);
+ break;
+ case MAC_INTERNALLOOP_MAC:
+ ret = hns_mac_config_mac_loopback(vf_cb->mac_cb, loop, en);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static void hns_ae_update_led_status(struct hnae_handle *handle)
+{
+ struct hns_mac_cb *mac_cb;
+
+ assert(handle);
+ mac_cb = hns_get_mac_cb(handle);
+ if (mac_cb->media_type != HNAE_MEDIA_TYPE_FIBER)
+ return;
+
+ hns_set_led_opt(mac_cb);
+}
+
+static int hns_ae_cpld_set_led_id(struct hnae_handle *handle,
+ enum hnae_led_state status)
+{
+ struct hns_mac_cb *mac_cb;
+
+ assert(handle);
+
+ mac_cb = hns_get_mac_cb(handle);
+
+ return hns_cpld_led_set_id(mac_cb, status);
+}
+
+static void hns_ae_get_regs(struct hnae_handle *handle, void *data)
+{
+ u32 *p = data;
+ int i;
+ struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
+ struct hns_ppe_cb *ppe_cb = hns_get_ppe_cb(handle);
+
+ hns_ppe_get_regs(ppe_cb, p);
+ p += hns_ppe_get_regs_count();
+
+ hns_rcb_get_common_regs(vf_cb->dsaf_dev->rcb_common[0], p);
+ p += hns_rcb_get_common_regs_count();
+
+ for (i = 0; i < handle->q_num; i++) {
+ hns_rcb_get_ring_regs(handle->qs[i], p);
+ p += hns_rcb_get_ring_regs_count();
+ }
+
+ hns_mac_get_regs(vf_cb->mac_cb, p);
+ p += hns_mac_get_regs_count(vf_cb->mac_cb);
+
+ if (vf_cb->mac_cb->mac_type == HNAE_PORT_SERVICE)
+ hns_dsaf_get_regs(vf_cb->dsaf_dev, vf_cb->port_index, p);
+}
+
+static int hns_ae_get_regs_len(struct hnae_handle *handle)
+{
+ u32 total_num;
+ struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
+
+ total_num = hns_ppe_get_regs_count();
+ total_num += hns_rcb_get_common_regs_count();
+ total_num += hns_rcb_get_ring_regs_count() * handle->q_num;
+ total_num += hns_mac_get_regs_count(vf_cb->mac_cb);
+
+ if (vf_cb->mac_cb->mac_type == HNAE_PORT_SERVICE)
+ total_num += hns_dsaf_get_regs_count();
+
+ return total_num;
+}
+
+static u32 hns_ae_get_rss_key_size(struct hnae_handle *handle)
+{
+ return HNS_PPEV2_RSS_KEY_SIZE;
+}
+
+static u32 hns_ae_get_rss_indir_size(struct hnae_handle *handle)
+{
+ return HNS_PPEV2_RSS_IND_TBL_SIZE;
+}
+
+static int hns_ae_get_rss(struct hnae_handle *handle, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ struct hns_ppe_cb *ppe_cb = hns_get_ppe_cb(handle);
+
+ /* currently we support only one type of hash function i.e. Toep hash */
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+
+ /* get the RSS Key required by the user */
+ if (key)
+ memcpy(key, ppe_cb->rss_key, HNS_PPEV2_RSS_KEY_SIZE);
+
+ /* update the current hash->queue mappings from the shadow RSS table */
+ if (indir)
+ memcpy(indir, ppe_cb->rss_indir_table,
+ HNS_PPEV2_RSS_IND_TBL_SIZE * sizeof(*indir));
+
+ return 0;
+}
+
+static int hns_ae_set_rss(struct hnae_handle *handle, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct hns_ppe_cb *ppe_cb = hns_get_ppe_cb(handle);
+
+ /* set the RSS Hash Key if specififed by the user */
+ if (key) {
+ memcpy(ppe_cb->rss_key, key, HNS_PPEV2_RSS_KEY_SIZE);
+ hns_ppe_set_rss_key(ppe_cb, ppe_cb->rss_key);
+ }
+
+ if (indir) {
+ /* update the shadow RSS table with user specified qids */
+ memcpy(ppe_cb->rss_indir_table, indir,
+ HNS_PPEV2_RSS_IND_TBL_SIZE * sizeof(*indir));
+
+ /* now update the hardware */
+ hns_ppe_set_indir_table(ppe_cb, ppe_cb->rss_indir_table);
+ }
+
+ return 0;
+}
+
+static struct hnae_ae_ops hns_dsaf_ops = {
+ .get_handle = hns_ae_get_handle,
+ .put_handle = hns_ae_put_handle,
+ .init_queue = hns_ae_init_queue,
+ .fini_queue = hns_ae_fini_queue,
+ .start = hns_ae_start,
+ .stop = hns_ae_stop,
+ .reset = hns_ae_reset,
+ .toggle_ring_irq = hns_ae_toggle_ring_irq,
+ .get_status = hns_ae_get_link_status,
+ .get_info = hns_ae_get_mac_info,
+ .adjust_link = hns_ae_adjust_link,
+ .need_adjust_link = hns_ae_need_adjust_link,
+ .set_loopback = hns_ae_config_loopback,
+ .get_ring_bdnum_limit = hns_ae_get_ring_bdnum_limit,
+ .get_pauseparam = hns_ae_get_pauseparam,
+ .set_pauseparam = hns_ae_set_pauseparam,
+ .get_coalesce_usecs = hns_ae_get_coalesce_usecs,
+ .get_max_coalesced_frames = hns_ae_get_max_coalesced_frames,
+ .set_coalesce_usecs = hns_ae_set_coalesce_usecs,
+ .set_coalesce_frames = hns_ae_set_coalesce_frames,
+ .get_coalesce_range = hns_ae_get_coalesce_range,
+ .set_promisc_mode = hns_ae_set_promisc_mode,
+ .set_mac_addr = hns_ae_set_mac_address,
+ .add_uc_addr = hns_ae_add_uc_address,
+ .rm_uc_addr = hns_ae_rm_uc_address,
+ .set_mc_addr = hns_ae_set_multicast_one,
+ .clr_mc_addr = hns_ae_clr_multicast,
+ .set_mtu = hns_ae_set_mtu,
+ .update_stats = hns_ae_update_stats,
+ .set_tso_stats = hns_ae_set_tso_stats,
+ .get_stats = hns_ae_get_stats,
+ .get_strings = hns_ae_get_strings,
+ .get_sset_count = hns_ae_get_sset_count,
+ .update_led_status = hns_ae_update_led_status,
+ .set_led_id = hns_ae_cpld_set_led_id,
+ .get_regs = hns_ae_get_regs,
+ .get_regs_len = hns_ae_get_regs_len,
+ .get_rss_key_size = hns_ae_get_rss_key_size,
+ .get_rss_indir_size = hns_ae_get_rss_indir_size,
+ .get_rss = hns_ae_get_rss,
+ .set_rss = hns_ae_set_rss
+};
+
+int hns_dsaf_ae_init(struct dsaf_device *dsaf_dev)
+{
+ struct hnae_ae_dev *ae_dev = &dsaf_dev->ae_dev;
+ static atomic_t id = ATOMIC_INIT(-1);
+
+ switch (dsaf_dev->dsaf_ver) {
+ case AE_VERSION_1:
+ hns_dsaf_ops.toggle_ring_irq = hns_ae_toggle_ring_irq;
+ break;
+ case AE_VERSION_2:
+ hns_dsaf_ops.toggle_ring_irq = hns_aev2_toggle_ring_irq;
+ break;
+ default:
+ break;
+ }
+
+ snprintf(ae_dev->name, AE_NAME_SIZE, "%s%d", DSAF_DEVICE_NAME,
+ (int)atomic_inc_return(&id));
+ ae_dev->ops = &hns_dsaf_ops;
+ ae_dev->dev = dsaf_dev->dev;
+
+ return hnae_ae_register(ae_dev, THIS_MODULE);
+}
+
+void hns_dsaf_ae_uninit(struct dsaf_device *dsaf_dev)
+{
+ hnae_ae_unregister(&dsaf_dev->ae_dev);
+}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
new file mode 100644
index 0000000000..8f391e2adc
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
@@ -0,0 +1,739 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ */
+
+#include <linux/delay.h>
+#include <linux/of_mdio.h>
+#include "hns_dsaf_main.h"
+#include "hns_dsaf_mac.h"
+#include "hns_dsaf_gmac.h"
+
+static const struct mac_stats_string g_gmac_stats_string[] = {
+ {"gmac_rx_octets_total_ok", MAC_STATS_FIELD_OFF(rx_good_bytes)},
+ {"gmac_rx_octets_bad", MAC_STATS_FIELD_OFF(rx_bad_bytes)},
+ {"gmac_rx_uc_pkts", MAC_STATS_FIELD_OFF(rx_uc_pkts)},
+ {"gmac_rx_mc_pkts", MAC_STATS_FIELD_OFF(rx_mc_pkts)},
+ {"gmac_rx_bc_pkts", MAC_STATS_FIELD_OFF(rx_bc_pkts)},
+ {"gmac_rx_pkts_64octets", MAC_STATS_FIELD_OFF(rx_64bytes)},
+ {"gmac_rx_pkts_65to127", MAC_STATS_FIELD_OFF(rx_65to127)},
+ {"gmac_rx_pkts_128to255", MAC_STATS_FIELD_OFF(rx_128to255)},
+ {"gmac_rx_pkts_256to511", MAC_STATS_FIELD_OFF(rx_256to511)},
+ {"gmac_rx_pkts_512to1023", MAC_STATS_FIELD_OFF(rx_512to1023)},
+ {"gmac_rx_pkts_1024to1518", MAC_STATS_FIELD_OFF(rx_1024to1518)},
+ {"gmac_rx_pkts_1519tomax", MAC_STATS_FIELD_OFF(rx_1519tomax)},
+ {"gmac_rx_fcs_errors", MAC_STATS_FIELD_OFF(rx_fcs_err)},
+ {"gmac_rx_tagged", MAC_STATS_FIELD_OFF(rx_vlan_pkts)},
+ {"gmac_rx_data_err", MAC_STATS_FIELD_OFF(rx_data_err)},
+ {"gmac_rx_align_errors", MAC_STATS_FIELD_OFF(rx_align_err)},
+ {"gmac_rx_long_errors", MAC_STATS_FIELD_OFF(rx_oversize)},
+ {"gmac_rx_jabber_errors", MAC_STATS_FIELD_OFF(rx_jabber_err)},
+ {"gmac_rx_pause_maccontrol", MAC_STATS_FIELD_OFF(rx_pfc_tc0)},
+ {"gmac_rx_unknown_maccontrol", MAC_STATS_FIELD_OFF(rx_unknown_ctrl)},
+ {"gmac_rx_very_long_err", MAC_STATS_FIELD_OFF(rx_long_err)},
+ {"gmac_rx_runt_err", MAC_STATS_FIELD_OFF(rx_minto64)},
+ {"gmac_rx_short_err", MAC_STATS_FIELD_OFF(rx_under_min)},
+ {"gmac_rx_filt_pkt", MAC_STATS_FIELD_OFF(rx_filter_pkts)},
+ {"gmac_rx_octets_total_filt", MAC_STATS_FIELD_OFF(rx_filter_bytes)},
+ {"gmac_rx_overrun_cnt", MAC_STATS_FIELD_OFF(rx_fifo_overrun_err)},
+ {"gmac_rx_length_err", MAC_STATS_FIELD_OFF(rx_len_err)},
+ {"gmac_rx_fail_comma", MAC_STATS_FIELD_OFF(rx_comma_err)},
+
+ {"gmac_tx_octets_ok", MAC_STATS_FIELD_OFF(tx_good_bytes)},
+ {"gmac_tx_octets_bad", MAC_STATS_FIELD_OFF(tx_bad_bytes)},
+ {"gmac_tx_uc_pkts", MAC_STATS_FIELD_OFF(tx_uc_pkts)},
+ {"gmac_tx_mc_pkts", MAC_STATS_FIELD_OFF(tx_mc_pkts)},
+ {"gmac_tx_bc_pkts", MAC_STATS_FIELD_OFF(tx_bc_pkts)},
+ {"gmac_tx_pkts_64octets", MAC_STATS_FIELD_OFF(tx_64bytes)},
+ {"gmac_tx_pkts_65to127", MAC_STATS_FIELD_OFF(tx_65to127)},
+ {"gmac_tx_pkts_128to255", MAC_STATS_FIELD_OFF(tx_128to255)},
+ {"gmac_tx_pkts_256to511", MAC_STATS_FIELD_OFF(tx_256to511)},
+ {"gmac_tx_pkts_512to1023", MAC_STATS_FIELD_OFF(tx_512to1023)},
+ {"gmac_tx_pkts_1024to1518", MAC_STATS_FIELD_OFF(tx_1024to1518)},
+ {"gmac_tx_pkts_1519tomax", MAC_STATS_FIELD_OFF(tx_1519tomax)},
+ {"gmac_tx_excessive_length_drop", MAC_STATS_FIELD_OFF(tx_jabber_err)},
+ {"gmac_tx_underrun", MAC_STATS_FIELD_OFF(tx_underrun_err)},
+ {"gmac_tx_tagged", MAC_STATS_FIELD_OFF(tx_vlan)},
+ {"gmac_tx_crc_error", MAC_STATS_FIELD_OFF(tx_crc_err)},
+ {"gmac_tx_pause_frames", MAC_STATS_FIELD_OFF(tx_pfc_tc0)}
+};
+
+static void hns_gmac_enable(void *mac_drv, enum mac_commom_mode mode)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ /*enable GE rX/tX */
+ if (mode == MAC_COMM_MODE_TX || mode == MAC_COMM_MODE_RX_AND_TX)
+ dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_TX_EN_B, 1);
+
+ if (mode == MAC_COMM_MODE_RX || mode == MAC_COMM_MODE_RX_AND_TX) {
+ /* enable rx pcs */
+ dsaf_set_dev_bit(drv, GMAC_PCS_RX_EN_REG, 0, 0);
+ dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_RX_EN_B, 1);
+ }
+}
+
+static void hns_gmac_disable(void *mac_drv, enum mac_commom_mode mode)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ /*disable GE rX/tX */
+ if (mode == MAC_COMM_MODE_TX || mode == MAC_COMM_MODE_RX_AND_TX)
+ dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_TX_EN_B, 0);
+
+ if (mode == MAC_COMM_MODE_RX || mode == MAC_COMM_MODE_RX_AND_TX) {
+ /* disable rx pcs */
+ dsaf_set_dev_bit(drv, GMAC_PCS_RX_EN_REG, 0, 1);
+ dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_RX_EN_B, 0);
+ }
+}
+
+/* hns_gmac_get_en - get port enable
+ * @mac_drv:mac device
+ * @rx:rx enable
+ * @tx:tx enable
+ */
+static void hns_gmac_get_en(void *mac_drv, u32 *rx, u32 *tx)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+ u32 porten;
+
+ porten = dsaf_read_dev(drv, GMAC_PORT_EN_REG);
+ *tx = dsaf_get_bit(porten, GMAC_PORT_TX_EN_B);
+ *rx = dsaf_get_bit(porten, GMAC_PORT_RX_EN_B);
+}
+
+static void hns_gmac_free(void *mac_drv)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+ struct dsaf_device *dsaf_dev
+ = (struct dsaf_device *)dev_get_drvdata(drv->dev);
+
+ u32 mac_id = drv->mac_id;
+
+ dsaf_dev->misc_op->ge_srst(dsaf_dev, mac_id, 0);
+}
+
+static void hns_gmac_set_tx_auto_pause_frames(void *mac_drv, u16 newval)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ dsaf_set_dev_field(drv, GMAC_FC_TX_TIMER_REG, GMAC_FC_TX_TIMER_M,
+ GMAC_FC_TX_TIMER_S, newval);
+}
+
+static void hns_gmac_get_tx_auto_pause_frames(void *mac_drv, u16 *newval)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ *newval = dsaf_get_dev_field(drv, GMAC_FC_TX_TIMER_REG,
+ GMAC_FC_TX_TIMER_M, GMAC_FC_TX_TIMER_S);
+}
+
+static void hns_gmac_config_max_frame_length(void *mac_drv, u16 newval)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ dsaf_set_dev_field(drv, GMAC_MAX_FRM_SIZE_REG, GMAC_MAX_FRM_SIZE_M,
+ GMAC_MAX_FRM_SIZE_S, newval);
+
+ dsaf_set_dev_field(drv, GAMC_RX_MAX_FRAME, GMAC_MAX_FRM_SIZE_M,
+ GMAC_MAX_FRM_SIZE_S, newval);
+}
+
+static void hns_gmac_config_pad_and_crc(void *mac_drv, u8 newval)
+{
+ u32 tx_ctrl;
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ tx_ctrl = dsaf_read_dev(drv, GMAC_TRANSMIT_CONTROL_REG);
+ dsaf_set_bit(tx_ctrl, GMAC_TX_PAD_EN_B, !!newval);
+ dsaf_set_bit(tx_ctrl, GMAC_TX_CRC_ADD_B, !!newval);
+ dsaf_write_dev(drv, GMAC_TRANSMIT_CONTROL_REG, tx_ctrl);
+}
+
+static void hns_gmac_config_an_mode(void *mac_drv, u8 newval)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ dsaf_set_dev_bit(drv, GMAC_TRANSMIT_CONTROL_REG,
+ GMAC_TX_AN_EN_B, !!newval);
+}
+
+static void hns_gmac_tx_loop_pkt_dis(void *mac_drv)
+{
+ u32 tx_loop_pkt_pri;
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ tx_loop_pkt_pri = dsaf_read_dev(drv, GMAC_TX_LOOP_PKT_PRI_REG);
+ dsaf_set_bit(tx_loop_pkt_pri, GMAC_TX_LOOP_PKT_EN_B, 1);
+ dsaf_set_bit(tx_loop_pkt_pri, GMAC_TX_LOOP_PKT_HIG_PRI_B, 0);
+ dsaf_write_dev(drv, GMAC_TX_LOOP_PKT_PRI_REG, tx_loop_pkt_pri);
+}
+
+static void hns_gmac_get_duplex_type(void *mac_drv,
+ enum hns_gmac_duplex_mdoe *duplex_mode)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ *duplex_mode = (enum hns_gmac_duplex_mdoe)dsaf_get_dev_bit(
+ drv, GMAC_DUPLEX_TYPE_REG, GMAC_DUPLEX_TYPE_B);
+}
+
+static void hns_gmac_get_port_mode(void *mac_drv, enum hns_port_mode *port_mode)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ *port_mode = (enum hns_port_mode)dsaf_get_dev_field(
+ drv, GMAC_PORT_MODE_REG, GMAC_PORT_MODE_M, GMAC_PORT_MODE_S);
+}
+
+static void hns_gmac_port_mode_get(void *mac_drv,
+ struct hns_gmac_port_mode_cfg *port_mode)
+{
+ u32 tx_ctrl;
+ u32 recv_ctrl;
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ port_mode->port_mode = (enum hns_port_mode)dsaf_get_dev_field(
+ drv, GMAC_PORT_MODE_REG, GMAC_PORT_MODE_M, GMAC_PORT_MODE_S);
+
+ tx_ctrl = dsaf_read_dev(drv, GMAC_TRANSMIT_CONTROL_REG);
+ recv_ctrl = dsaf_read_dev(drv, GMAC_RECV_CONTROL_REG);
+
+ port_mode->max_frm_size =
+ dsaf_get_dev_field(drv, GMAC_MAX_FRM_SIZE_REG,
+ GMAC_MAX_FRM_SIZE_M, GMAC_MAX_FRM_SIZE_S);
+ port_mode->short_runts_thr =
+ dsaf_get_dev_field(drv, GMAC_SHORT_RUNTS_THR_REG,
+ GMAC_SHORT_RUNTS_THR_M,
+ GMAC_SHORT_RUNTS_THR_S);
+
+ port_mode->pad_enable = dsaf_get_bit(tx_ctrl, GMAC_TX_PAD_EN_B);
+ port_mode->crc_add = dsaf_get_bit(tx_ctrl, GMAC_TX_CRC_ADD_B);
+ port_mode->an_enable = dsaf_get_bit(tx_ctrl, GMAC_TX_AN_EN_B);
+
+ port_mode->runt_pkt_en =
+ dsaf_get_bit(recv_ctrl, GMAC_RECV_CTRL_RUNT_PKT_EN_B);
+ port_mode->strip_pad_en =
+ dsaf_get_bit(recv_ctrl, GMAC_RECV_CTRL_STRIP_PAD_EN_B);
+}
+
+static void hns_gmac_pause_frm_cfg(void *mac_drv, u32 rx_pause_en,
+ u32 tx_pause_en)
+{
+ u32 pause_en;
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ pause_en = dsaf_read_dev(drv, GMAC_PAUSE_EN_REG);
+ dsaf_set_bit(pause_en, GMAC_PAUSE_EN_RX_FDFC_B, !!rx_pause_en);
+ dsaf_set_bit(pause_en, GMAC_PAUSE_EN_TX_FDFC_B, !!tx_pause_en);
+ dsaf_write_dev(drv, GMAC_PAUSE_EN_REG, pause_en);
+}
+
+static void hns_gmac_get_pausefrm_cfg(void *mac_drv, u32 *rx_pause_en,
+ u32 *tx_pause_en)
+{
+ u32 pause_en;
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ pause_en = dsaf_read_dev(drv, GMAC_PAUSE_EN_REG);
+
+ *rx_pause_en = dsaf_get_bit(pause_en, GMAC_PAUSE_EN_RX_FDFC_B);
+ *tx_pause_en = dsaf_get_bit(pause_en, GMAC_PAUSE_EN_TX_FDFC_B);
+}
+
+static bool hns_gmac_need_adjust_link(void *mac_drv, enum mac_speed speed,
+ int duplex)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+ struct hns_mac_cb *mac_cb = drv->mac_cb;
+
+ return (mac_cb->speed != speed) ||
+ (mac_cb->half_duplex == duplex);
+}
+
+static int hns_gmac_adjust_link(void *mac_drv, enum mac_speed speed,
+ u32 full_duplex)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ dsaf_set_dev_bit(drv, GMAC_DUPLEX_TYPE_REG,
+ GMAC_DUPLEX_TYPE_B, !!full_duplex);
+
+ switch (speed) {
+ case MAC_SPEED_10:
+ dsaf_set_dev_field(
+ drv, GMAC_PORT_MODE_REG,
+ GMAC_PORT_MODE_M, GMAC_PORT_MODE_S, 0x6);
+ break;
+ case MAC_SPEED_100:
+ dsaf_set_dev_field(
+ drv, GMAC_PORT_MODE_REG,
+ GMAC_PORT_MODE_M, GMAC_PORT_MODE_S, 0x7);
+ break;
+ case MAC_SPEED_1000:
+ dsaf_set_dev_field(
+ drv, GMAC_PORT_MODE_REG,
+ GMAC_PORT_MODE_M, GMAC_PORT_MODE_S, 0x8);
+ break;
+ default:
+ dev_err(drv->dev,
+ "hns_gmac_adjust_link fail, speed%d mac%d\n",
+ speed, drv->mac_id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void hns_gmac_set_uc_match(void *mac_drv, u16 en)
+{
+ struct mac_driver *drv = mac_drv;
+
+ dsaf_set_dev_bit(drv, GMAC_REC_FILT_CONTROL_REG,
+ GMAC_UC_MATCH_EN_B, !en);
+ dsaf_set_dev_bit(drv, GMAC_STATION_ADDR_HIGH_2_REG,
+ GMAC_ADDR_EN_B, !en);
+}
+
+static void hns_gmac_set_promisc(void *mac_drv, u8 en)
+{
+ struct mac_driver *drv = mac_drv;
+
+ if (drv->mac_cb->mac_type == HNAE_PORT_DEBUG)
+ hns_gmac_set_uc_match(mac_drv, en);
+}
+
+static int hns_gmac_wait_fifo_clean(void *mac_drv)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+ int wait_cnt;
+ u32 val;
+
+ wait_cnt = 0;
+ while (wait_cnt++ < HNS_MAX_WAIT_CNT) {
+ val = dsaf_read_dev(drv, GMAC_FIFO_STATE_REG);
+ /* bit5~bit0 is not send complete pkts */
+ if ((val & 0x3f) == 0)
+ break;
+ usleep_range(100, 200);
+ }
+
+ if (wait_cnt >= HNS_MAX_WAIT_CNT) {
+ dev_err(drv->dev,
+ "hns ge %d fifo was not idle.\n", drv->mac_id);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static void hns_gmac_init(void *mac_drv)
+{
+ u32 port;
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+ struct dsaf_device *dsaf_dev
+ = (struct dsaf_device *)dev_get_drvdata(drv->dev);
+
+ port = drv->mac_id;
+
+ dsaf_dev->misc_op->ge_srst(dsaf_dev, port, 0);
+ mdelay(10);
+ dsaf_dev->misc_op->ge_srst(dsaf_dev, port, 1);
+ mdelay(10);
+ hns_gmac_disable(mac_drv, MAC_COMM_MODE_RX_AND_TX);
+ hns_gmac_tx_loop_pkt_dis(mac_drv);
+ if (drv->mac_cb->mac_type == HNAE_PORT_DEBUG)
+ hns_gmac_set_uc_match(mac_drv, 0);
+
+ hns_gmac_config_pad_and_crc(mac_drv, 1);
+
+ dsaf_set_dev_bit(drv, GMAC_MODE_CHANGE_EN_REG,
+ GMAC_MODE_CHANGE_EB_B, 1);
+
+ /* reduce gmac tx water line to avoid gmac hang-up
+ * in speed 100M and duplex half.
+ */
+ dsaf_set_dev_field(drv, GMAC_TX_WATER_LINE_REG, GMAC_TX_WATER_LINE_MASK,
+ GMAC_TX_WATER_LINE_SHIFT, 8);
+}
+
+static void hns_gmac_update_stats(void *mac_drv)
+{
+ struct mac_hw_stats *hw_stats = NULL;
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ hw_stats = &drv->mac_cb->hw_stats;
+
+ /* RX */
+ hw_stats->rx_good_bytes
+ += dsaf_read_dev(drv, GMAC_RX_OCTETS_TOTAL_OK_REG);
+ hw_stats->rx_bad_bytes
+ += dsaf_read_dev(drv, GMAC_RX_OCTETS_BAD_REG);
+ hw_stats->rx_uc_pkts += dsaf_read_dev(drv, GMAC_RX_UC_PKTS_REG);
+ hw_stats->rx_mc_pkts += dsaf_read_dev(drv, GMAC_RX_MC_PKTS_REG);
+ hw_stats->rx_bc_pkts += dsaf_read_dev(drv, GMAC_RX_BC_PKTS_REG);
+ hw_stats->rx_64bytes
+ += dsaf_read_dev(drv, GMAC_RX_PKTS_64OCTETS_REG);
+ hw_stats->rx_65to127
+ += dsaf_read_dev(drv, GMAC_RX_PKTS_65TO127OCTETS_REG);
+ hw_stats->rx_128to255
+ += dsaf_read_dev(drv, GMAC_RX_PKTS_128TO255OCTETS_REG);
+ hw_stats->rx_256to511
+ += dsaf_read_dev(drv, GMAC_RX_PKTS_255TO511OCTETS_REG);
+ hw_stats->rx_512to1023
+ += dsaf_read_dev(drv, GMAC_RX_PKTS_512TO1023OCTETS_REG);
+ hw_stats->rx_1024to1518
+ += dsaf_read_dev(drv, GMAC_RX_PKTS_1024TO1518OCTETS_REG);
+ hw_stats->rx_1519tomax
+ += dsaf_read_dev(drv, GMAC_RX_PKTS_1519TOMAXOCTETS_REG);
+ hw_stats->rx_fcs_err += dsaf_read_dev(drv, GMAC_RX_FCS_ERRORS_REG);
+ hw_stats->rx_vlan_pkts += dsaf_read_dev(drv, GMAC_RX_TAGGED_REG);
+ hw_stats->rx_data_err += dsaf_read_dev(drv, GMAC_RX_DATA_ERR_REG);
+ hw_stats->rx_align_err
+ += dsaf_read_dev(drv, GMAC_RX_ALIGN_ERRORS_REG);
+ hw_stats->rx_oversize
+ += dsaf_read_dev(drv, GMAC_RX_LONG_ERRORS_REG);
+ hw_stats->rx_jabber_err
+ += dsaf_read_dev(drv, GMAC_RX_JABBER_ERRORS_REG);
+ hw_stats->rx_pfc_tc0
+ += dsaf_read_dev(drv, GMAC_RX_PAUSE_MACCTRL_FRAM_REG);
+ hw_stats->rx_unknown_ctrl
+ += dsaf_read_dev(drv, GMAC_RX_UNKNOWN_MACCTRL_FRAM_REG);
+ hw_stats->rx_long_err
+ += dsaf_read_dev(drv, GMAC_RX_VERY_LONG_ERR_CNT_REG);
+ hw_stats->rx_minto64
+ += dsaf_read_dev(drv, GMAC_RX_RUNT_ERR_CNT_REG);
+ hw_stats->rx_under_min
+ += dsaf_read_dev(drv, GMAC_RX_SHORT_ERR_CNT_REG);
+ hw_stats->rx_filter_pkts
+ += dsaf_read_dev(drv, GMAC_RX_FILT_PKT_CNT_REG);
+ hw_stats->rx_filter_bytes
+ += dsaf_read_dev(drv, GMAC_RX_OCTETS_TOTAL_FILT_REG);
+ hw_stats->rx_fifo_overrun_err
+ += dsaf_read_dev(drv, GMAC_RX_OVERRUN_CNT_REG);
+ hw_stats->rx_len_err
+ += dsaf_read_dev(drv, GMAC_RX_LENGTHFIELD_ERR_CNT_REG);
+ hw_stats->rx_comma_err
+ += dsaf_read_dev(drv, GMAC_RX_FAIL_COMMA_CNT_REG);
+
+ /* TX */
+ hw_stats->tx_good_bytes
+ += dsaf_read_dev(drv, GMAC_OCTETS_TRANSMITTED_OK_REG);
+ hw_stats->tx_bad_bytes
+ += dsaf_read_dev(drv, GMAC_OCTETS_TRANSMITTED_BAD_REG);
+ hw_stats->tx_uc_pkts += dsaf_read_dev(drv, GMAC_TX_UC_PKTS_REG);
+ hw_stats->tx_mc_pkts += dsaf_read_dev(drv, GMAC_TX_MC_PKTS_REG);
+ hw_stats->tx_bc_pkts += dsaf_read_dev(drv, GMAC_TX_BC_PKTS_REG);
+ hw_stats->tx_64bytes
+ += dsaf_read_dev(drv, GMAC_TX_PKTS_64OCTETS_REG);
+ hw_stats->tx_65to127
+ += dsaf_read_dev(drv, GMAC_TX_PKTS_65TO127OCTETS_REG);
+ hw_stats->tx_128to255
+ += dsaf_read_dev(drv, GMAC_TX_PKTS_128TO255OCTETS_REG);
+ hw_stats->tx_256to511
+ += dsaf_read_dev(drv, GMAC_TX_PKTS_255TO511OCTETS_REG);
+ hw_stats->tx_512to1023
+ += dsaf_read_dev(drv, GMAC_TX_PKTS_512TO1023OCTETS_REG);
+ hw_stats->tx_1024to1518
+ += dsaf_read_dev(drv, GMAC_TX_PKTS_1024TO1518OCTETS_REG);
+ hw_stats->tx_1519tomax
+ += dsaf_read_dev(drv, GMAC_TX_PKTS_1519TOMAXOCTETS_REG);
+ hw_stats->tx_jabber_err
+ += dsaf_read_dev(drv, GMAC_TX_EXCESSIVE_LENGTH_DROP_REG);
+ hw_stats->tx_underrun_err
+ += dsaf_read_dev(drv, GMAC_TX_UNDERRUN_REG);
+ hw_stats->tx_vlan += dsaf_read_dev(drv, GMAC_TX_TAGGED_REG);
+ hw_stats->tx_crc_err += dsaf_read_dev(drv, GMAC_TX_CRC_ERROR_REG);
+ hw_stats->tx_pfc_tc0
+ += dsaf_read_dev(drv, GMAC_TX_PAUSE_FRAMES_REG);
+}
+
+static void hns_gmac_set_mac_addr(void *mac_drv, const char *mac_addr)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ u32 high_val = mac_addr[1] | (mac_addr[0] << 8);
+
+ u32 low_val = mac_addr[5] | (mac_addr[4] << 8)
+ | (mac_addr[3] << 16) | (mac_addr[2] << 24);
+
+ u32 val = dsaf_read_dev(drv, GMAC_STATION_ADDR_HIGH_2_REG);
+ u32 sta_addr_en = dsaf_get_bit(val, GMAC_ADDR_EN_B);
+
+ dsaf_write_dev(drv, GMAC_STATION_ADDR_LOW_2_REG, low_val);
+ dsaf_write_dev(drv, GMAC_STATION_ADDR_HIGH_2_REG,
+ high_val | (sta_addr_en << GMAC_ADDR_EN_B));
+}
+
+static int hns_gmac_config_loopback(void *mac_drv, enum hnae_loop loop_mode,
+ u8 enable)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ switch (loop_mode) {
+ case MAC_INTERNALLOOP_MAC:
+ dsaf_set_dev_bit(drv, GMAC_LOOP_REG, GMAC_LP_REG_CF2MI_LP_EN_B,
+ !!enable);
+ break;
+ default:
+ dev_err(drv->dev, "loop_mode error\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void hns_gmac_get_info(void *mac_drv, struct mac_info *mac_info)
+{
+ enum hns_gmac_duplex_mdoe duplex;
+ enum hns_port_mode speed;
+ u32 rx_pause;
+ u32 tx_pause;
+ u32 rx;
+ u32 tx;
+ u16 fc_tx_timer;
+ struct hns_gmac_port_mode_cfg port_mode = { GMAC_10M_MII, 0 };
+
+ hns_gmac_port_mode_get(mac_drv, &port_mode);
+ mac_info->pad_and_crc_en = port_mode.crc_add && port_mode.pad_enable;
+ mac_info->auto_neg = port_mode.an_enable;
+
+ hns_gmac_get_tx_auto_pause_frames(mac_drv, &fc_tx_timer);
+ mac_info->tx_pause_time = fc_tx_timer;
+
+ hns_gmac_get_en(mac_drv, &rx, &tx);
+ mac_info->port_en = rx && tx;
+
+ hns_gmac_get_duplex_type(mac_drv, &duplex);
+ mac_info->duplex = duplex;
+
+ hns_gmac_get_port_mode(mac_drv, &speed);
+ switch (speed) {
+ case GMAC_10M_SGMII:
+ mac_info->speed = MAC_SPEED_10;
+ break;
+ case GMAC_100M_SGMII:
+ mac_info->speed = MAC_SPEED_100;
+ break;
+ case GMAC_1000M_SGMII:
+ mac_info->speed = MAC_SPEED_1000;
+ break;
+ default:
+ mac_info->speed = 0;
+ break;
+ }
+
+ hns_gmac_get_pausefrm_cfg(mac_drv, &rx_pause, &tx_pause);
+ mac_info->rx_pause_en = rx_pause;
+ mac_info->tx_pause_en = tx_pause;
+}
+
+static void hns_gmac_autoneg_stat(void *mac_drv, u32 *enable)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ *enable = dsaf_get_dev_bit(drv, GMAC_TRANSMIT_CONTROL_REG,
+ GMAC_TX_AN_EN_B);
+}
+
+static void hns_gmac_get_link_status(void *mac_drv, u32 *link_stat)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ *link_stat = dsaf_get_dev_bit(drv, GMAC_AN_NEG_STATE_REG,
+ GMAC_AN_NEG_STAT_RX_SYNC_OK_B);
+}
+
+static void hns_gmac_get_regs(void *mac_drv, void *data)
+{
+ u32 *regs = data;
+ int i;
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ /* base config registers */
+ regs[0] = dsaf_read_dev(drv, GMAC_DUPLEX_TYPE_REG);
+ regs[1] = dsaf_read_dev(drv, GMAC_FD_FC_TYPE_REG);
+ regs[2] = dsaf_read_dev(drv, GMAC_FC_TX_TIMER_REG);
+ regs[3] = dsaf_read_dev(drv, GMAC_FD_FC_ADDR_LOW_REG);
+ regs[4] = dsaf_read_dev(drv, GMAC_FD_FC_ADDR_HIGH_REG);
+ regs[5] = dsaf_read_dev(drv, GMAC_IPG_TX_TIMER_REG);
+ regs[6] = dsaf_read_dev(drv, GMAC_PAUSE_THR_REG);
+ regs[7] = dsaf_read_dev(drv, GMAC_MAX_FRM_SIZE_REG);
+ regs[8] = dsaf_read_dev(drv, GMAC_PORT_MODE_REG);
+ regs[9] = dsaf_read_dev(drv, GMAC_PORT_EN_REG);
+ regs[10] = dsaf_read_dev(drv, GMAC_PAUSE_EN_REG);
+ regs[11] = dsaf_read_dev(drv, GMAC_SHORT_RUNTS_THR_REG);
+ regs[12] = dsaf_read_dev(drv, GMAC_AN_NEG_STATE_REG);
+ regs[13] = dsaf_read_dev(drv, GMAC_TX_LOCAL_PAGE_REG);
+ regs[14] = dsaf_read_dev(drv, GMAC_TRANSMIT_CONTROL_REG);
+ regs[15] = dsaf_read_dev(drv, GMAC_REC_FILT_CONTROL_REG);
+ regs[16] = dsaf_read_dev(drv, GMAC_PTP_CONFIG_REG);
+
+ /* rx static registers */
+ regs[17] = dsaf_read_dev(drv, GMAC_RX_OCTETS_TOTAL_OK_REG);
+ regs[18] = dsaf_read_dev(drv, GMAC_RX_OCTETS_BAD_REG);
+ regs[19] = dsaf_read_dev(drv, GMAC_RX_UC_PKTS_REG);
+ regs[20] = dsaf_read_dev(drv, GMAC_RX_MC_PKTS_REG);
+ regs[21] = dsaf_read_dev(drv, GMAC_RX_BC_PKTS_REG);
+ regs[22] = dsaf_read_dev(drv, GMAC_RX_PKTS_64OCTETS_REG);
+ regs[23] = dsaf_read_dev(drv, GMAC_RX_PKTS_65TO127OCTETS_REG);
+ regs[24] = dsaf_read_dev(drv, GMAC_RX_PKTS_128TO255OCTETS_REG);
+ regs[25] = dsaf_read_dev(drv, GMAC_RX_PKTS_255TO511OCTETS_REG);
+ regs[26] = dsaf_read_dev(drv, GMAC_RX_PKTS_512TO1023OCTETS_REG);
+ regs[27] = dsaf_read_dev(drv, GMAC_RX_PKTS_1024TO1518OCTETS_REG);
+ regs[28] = dsaf_read_dev(drv, GMAC_RX_PKTS_1519TOMAXOCTETS_REG);
+ regs[29] = dsaf_read_dev(drv, GMAC_RX_FCS_ERRORS_REG);
+ regs[30] = dsaf_read_dev(drv, GMAC_RX_TAGGED_REG);
+ regs[31] = dsaf_read_dev(drv, GMAC_RX_DATA_ERR_REG);
+ regs[32] = dsaf_read_dev(drv, GMAC_RX_ALIGN_ERRORS_REG);
+ regs[33] = dsaf_read_dev(drv, GMAC_RX_LONG_ERRORS_REG);
+ regs[34] = dsaf_read_dev(drv, GMAC_RX_JABBER_ERRORS_REG);
+ regs[35] = dsaf_read_dev(drv, GMAC_RX_PAUSE_MACCTRL_FRAM_REG);
+ regs[36] = dsaf_read_dev(drv, GMAC_RX_UNKNOWN_MACCTRL_FRAM_REG);
+ regs[37] = dsaf_read_dev(drv, GMAC_RX_VERY_LONG_ERR_CNT_REG);
+ regs[38] = dsaf_read_dev(drv, GMAC_RX_RUNT_ERR_CNT_REG);
+ regs[39] = dsaf_read_dev(drv, GMAC_RX_SHORT_ERR_CNT_REG);
+ regs[40] = dsaf_read_dev(drv, GMAC_RX_FILT_PKT_CNT_REG);
+ regs[41] = dsaf_read_dev(drv, GMAC_RX_OCTETS_TOTAL_FILT_REG);
+
+ /* tx static registers */
+ regs[42] = dsaf_read_dev(drv, GMAC_OCTETS_TRANSMITTED_OK_REG);
+ regs[43] = dsaf_read_dev(drv, GMAC_OCTETS_TRANSMITTED_BAD_REG);
+ regs[44] = dsaf_read_dev(drv, GMAC_TX_UC_PKTS_REG);
+ regs[45] = dsaf_read_dev(drv, GMAC_TX_MC_PKTS_REG);
+ regs[46] = dsaf_read_dev(drv, GMAC_TX_BC_PKTS_REG);
+ regs[47] = dsaf_read_dev(drv, GMAC_TX_PKTS_64OCTETS_REG);
+ regs[48] = dsaf_read_dev(drv, GMAC_TX_PKTS_65TO127OCTETS_REG);
+ regs[49] = dsaf_read_dev(drv, GMAC_TX_PKTS_128TO255OCTETS_REG);
+ regs[50] = dsaf_read_dev(drv, GMAC_TX_PKTS_255TO511OCTETS_REG);
+ regs[51] = dsaf_read_dev(drv, GMAC_TX_PKTS_512TO1023OCTETS_REG);
+ regs[52] = dsaf_read_dev(drv, GMAC_TX_PKTS_1024TO1518OCTETS_REG);
+ regs[53] = dsaf_read_dev(drv, GMAC_TX_PKTS_1519TOMAXOCTETS_REG);
+ regs[54] = dsaf_read_dev(drv, GMAC_TX_EXCESSIVE_LENGTH_DROP_REG);
+ regs[55] = dsaf_read_dev(drv, GMAC_TX_UNDERRUN_REG);
+ regs[56] = dsaf_read_dev(drv, GMAC_TX_TAGGED_REG);
+ regs[57] = dsaf_read_dev(drv, GMAC_TX_CRC_ERROR_REG);
+ regs[58] = dsaf_read_dev(drv, GMAC_TX_PAUSE_FRAMES_REG);
+
+ regs[59] = dsaf_read_dev(drv, GAMC_RX_MAX_FRAME);
+ regs[60] = dsaf_read_dev(drv, GMAC_LINE_LOOP_BACK_REG);
+ regs[61] = dsaf_read_dev(drv, GMAC_CF_CRC_STRIP_REG);
+ regs[62] = dsaf_read_dev(drv, GMAC_MODE_CHANGE_EN_REG);
+ regs[63] = dsaf_read_dev(drv, GMAC_SIXTEEN_BIT_CNTR_REG);
+ regs[64] = dsaf_read_dev(drv, GMAC_LD_LINK_COUNTER_REG);
+ regs[65] = dsaf_read_dev(drv, GMAC_LOOP_REG);
+ regs[66] = dsaf_read_dev(drv, GMAC_RECV_CONTROL_REG);
+ regs[67] = dsaf_read_dev(drv, GMAC_VLAN_CODE_REG);
+ regs[68] = dsaf_read_dev(drv, GMAC_RX_OVERRUN_CNT_REG);
+ regs[69] = dsaf_read_dev(drv, GMAC_RX_LENGTHFIELD_ERR_CNT_REG);
+ regs[70] = dsaf_read_dev(drv, GMAC_RX_FAIL_COMMA_CNT_REG);
+
+ regs[71] = dsaf_read_dev(drv, GMAC_STATION_ADDR_LOW_0_REG);
+ regs[72] = dsaf_read_dev(drv, GMAC_STATION_ADDR_HIGH_0_REG);
+ regs[73] = dsaf_read_dev(drv, GMAC_STATION_ADDR_LOW_1_REG);
+ regs[74] = dsaf_read_dev(drv, GMAC_STATION_ADDR_HIGH_1_REG);
+ regs[75] = dsaf_read_dev(drv, GMAC_STATION_ADDR_LOW_2_REG);
+ regs[76] = dsaf_read_dev(drv, GMAC_STATION_ADDR_HIGH_2_REG);
+ regs[77] = dsaf_read_dev(drv, GMAC_STATION_ADDR_LOW_3_REG);
+ regs[78] = dsaf_read_dev(drv, GMAC_STATION_ADDR_HIGH_3_REG);
+ regs[79] = dsaf_read_dev(drv, GMAC_STATION_ADDR_LOW_4_REG);
+ regs[80] = dsaf_read_dev(drv, GMAC_STATION_ADDR_HIGH_4_REG);
+ regs[81] = dsaf_read_dev(drv, GMAC_STATION_ADDR_LOW_5_REG);
+ regs[82] = dsaf_read_dev(drv, GMAC_STATION_ADDR_HIGH_5_REG);
+ regs[83] = dsaf_read_dev(drv, GMAC_STATION_ADDR_LOW_MSK_0_REG);
+ regs[84] = dsaf_read_dev(drv, GMAC_STATION_ADDR_HIGH_MSK_0_REG);
+ regs[85] = dsaf_read_dev(drv, GMAC_STATION_ADDR_LOW_MSK_1_REG);
+ regs[86] = dsaf_read_dev(drv, GMAC_STATION_ADDR_HIGH_MSK_1_REG);
+ regs[87] = dsaf_read_dev(drv, GMAC_MAC_SKIP_LEN_REG);
+ regs[88] = dsaf_read_dev(drv, GMAC_TX_LOOP_PKT_PRI_REG);
+
+ /* mark end of mac regs */
+ for (i = 89; i < 96; i++)
+ regs[i] = 0xaaaaaaaa;
+}
+
+static void hns_gmac_get_stats(void *mac_drv, u64 *data)
+{
+ u32 i;
+ u64 *buf = data;
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+ struct mac_hw_stats *hw_stats = NULL;
+
+ hw_stats = &drv->mac_cb->hw_stats;
+
+ for (i = 0; i < ARRAY_SIZE(g_gmac_stats_string); i++) {
+ buf[i] = DSAF_STATS_READ(hw_stats,
+ g_gmac_stats_string[i].offset);
+ }
+}
+
+static void hns_gmac_get_strings(u32 stringset, u8 *data)
+{
+ u8 *buff = data;
+ u32 i;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(g_gmac_stats_string); i++)
+ ethtool_sprintf(&buff, g_gmac_stats_string[i].desc);
+}
+
+static int hns_gmac_get_sset_count(int stringset)
+{
+ if (stringset == ETH_SS_STATS)
+ return ARRAY_SIZE(g_gmac_stats_string);
+
+ return 0;
+}
+
+static int hns_gmac_get_regs_count(void)
+{
+ return ETH_GMAC_DUMP_NUM;
+}
+
+void *hns_gmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param)
+{
+ struct mac_driver *mac_drv;
+
+ mac_drv = devm_kzalloc(mac_cb->dev, sizeof(*mac_drv), GFP_KERNEL);
+ if (!mac_drv)
+ return NULL;
+
+ mac_drv->mac_init = hns_gmac_init;
+ mac_drv->mac_enable = hns_gmac_enable;
+ mac_drv->mac_disable = hns_gmac_disable;
+ mac_drv->mac_free = hns_gmac_free;
+ mac_drv->adjust_link = hns_gmac_adjust_link;
+ mac_drv->need_adjust_link = hns_gmac_need_adjust_link;
+ mac_drv->set_tx_auto_pause_frames = hns_gmac_set_tx_auto_pause_frames;
+ mac_drv->config_max_frame_length = hns_gmac_config_max_frame_length;
+ mac_drv->mac_pausefrm_cfg = hns_gmac_pause_frm_cfg;
+
+ mac_drv->mac_id = mac_param->mac_id;
+ mac_drv->mac_mode = mac_param->mac_mode;
+ mac_drv->io_base = mac_param->vaddr;
+ mac_drv->dev = mac_param->dev;
+ mac_drv->mac_cb = mac_cb;
+
+ mac_drv->set_mac_addr = hns_gmac_set_mac_addr;
+ mac_drv->set_an_mode = hns_gmac_config_an_mode;
+ mac_drv->config_loopback = hns_gmac_config_loopback;
+ mac_drv->config_pad_and_crc = hns_gmac_config_pad_and_crc;
+ mac_drv->get_info = hns_gmac_get_info;
+ mac_drv->autoneg_stat = hns_gmac_autoneg_stat;
+ mac_drv->get_pause_enable = hns_gmac_get_pausefrm_cfg;
+ mac_drv->get_link_status = hns_gmac_get_link_status;
+ mac_drv->get_regs = hns_gmac_get_regs;
+ mac_drv->get_regs_count = hns_gmac_get_regs_count;
+ mac_drv->get_ethtool_stats = hns_gmac_get_stats;
+ mac_drv->get_sset_count = hns_gmac_get_sset_count;
+ mac_drv->get_strings = hns_gmac_get_strings;
+ mac_drv->update_stats = hns_gmac_update_stats;
+ mac_drv->set_promiscuous = hns_gmac_set_promisc;
+ mac_drv->wait_fifo_clean = hns_gmac_wait_fifo_clean;
+
+ return (void *)mac_drv;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.h
new file mode 100644
index 0000000000..ec266e7fff
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ */
+
+#ifndef _HNS_GMAC_H
+#define _HNS_GMAC_H
+
+#include "hns_dsaf_mac.h"
+
+enum hns_port_mode {
+ GMAC_10M_MII = 0,
+ GMAC_100M_MII,
+ GMAC_1000M_GMII,
+ GMAC_10M_RGMII,
+ GMAC_100M_RGMII,
+ GMAC_1000M_RGMII,
+ GMAC_10M_SGMII,
+ GMAC_100M_SGMII,
+ GMAC_1000M_SGMII,
+ GMAC_10000M_SGMII /* 10GE */
+};
+
+enum hns_gmac_duplex_mdoe {
+ GMAC_HALF_DUPLEX_MODE = 0,
+ GMAC_FULL_DUPLEX_MODE
+};
+
+struct hns_gmac_port_mode_cfg {
+ enum hns_port_mode port_mode;
+ u32 max_frm_size;
+ u32 short_runts_thr;
+ u32 pad_enable;
+ u32 crc_add;
+ u32 an_enable; /*auto-nego enable */
+ u32 runt_pkt_en;
+ u32 strip_pad_en;
+};
+
+#define ETH_GMAC_DUMP_NUM 96
+#endif /* __HNS_GMAC_H__ */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
new file mode 100644
index 0000000000..f75668c479
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -0,0 +1,1259 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ */
+
+#include <linux/acpi.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_mdio.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+
+#include "hns_dsaf_main.h"
+#include "hns_dsaf_misc.h"
+#include "hns_dsaf_rcb.h"
+
+#define MAC_EN_FLAG_V 0xada0328
+
+static const u16 mac_phy_to_speed[] = {
+ [PHY_INTERFACE_MODE_MII] = MAC_SPEED_100,
+ [PHY_INTERFACE_MODE_GMII] = MAC_SPEED_1000,
+ [PHY_INTERFACE_MODE_SGMII] = MAC_SPEED_1000,
+ [PHY_INTERFACE_MODE_TBI] = MAC_SPEED_1000,
+ [PHY_INTERFACE_MODE_RMII] = MAC_SPEED_100,
+ [PHY_INTERFACE_MODE_RGMII] = MAC_SPEED_1000,
+ [PHY_INTERFACE_MODE_RGMII_ID] = MAC_SPEED_1000,
+ [PHY_INTERFACE_MODE_RGMII_RXID] = MAC_SPEED_1000,
+ [PHY_INTERFACE_MODE_RGMII_TXID] = MAC_SPEED_1000,
+ [PHY_INTERFACE_MODE_RTBI] = MAC_SPEED_1000,
+ [PHY_INTERFACE_MODE_XGMII] = MAC_SPEED_10000
+};
+
+static const enum mac_mode g_mac_mode_100[] = {
+ [PHY_INTERFACE_MODE_MII] = MAC_MODE_MII_100,
+ [PHY_INTERFACE_MODE_RMII] = MAC_MODE_RMII_100
+};
+
+static const enum mac_mode g_mac_mode_1000[] = {
+ [PHY_INTERFACE_MODE_GMII] = MAC_MODE_GMII_1000,
+ [PHY_INTERFACE_MODE_SGMII] = MAC_MODE_SGMII_1000,
+ [PHY_INTERFACE_MODE_TBI] = MAC_MODE_TBI_1000,
+ [PHY_INTERFACE_MODE_RGMII] = MAC_MODE_RGMII_1000,
+ [PHY_INTERFACE_MODE_RGMII_ID] = MAC_MODE_RGMII_1000,
+ [PHY_INTERFACE_MODE_RGMII_RXID] = MAC_MODE_RGMII_1000,
+ [PHY_INTERFACE_MODE_RGMII_TXID] = MAC_MODE_RGMII_1000,
+ [PHY_INTERFACE_MODE_RTBI] = MAC_MODE_RTBI_1000
+};
+
+static enum mac_mode hns_get_enet_interface(const struct hns_mac_cb *mac_cb)
+{
+ switch (mac_cb->max_speed) {
+ case MAC_SPEED_100:
+ return g_mac_mode_100[mac_cb->phy_if];
+ case MAC_SPEED_1000:
+ return g_mac_mode_1000[mac_cb->phy_if];
+ case MAC_SPEED_10000:
+ return MAC_MODE_XGMII_10000;
+ default:
+ return MAC_MODE_MII_100;
+ }
+}
+
+static u32 hns_mac_link_anti_shake(struct mac_driver *mac_ctrl_drv)
+{
+#define HNS_MAC_LINK_WAIT_TIME 5
+#define HNS_MAC_LINK_WAIT_CNT 40
+
+ u32 link_status = 0;
+ int i;
+
+ if (!mac_ctrl_drv->get_link_status)
+ return link_status;
+
+ for (i = 0; i < HNS_MAC_LINK_WAIT_CNT; i++) {
+ msleep(HNS_MAC_LINK_WAIT_TIME);
+ mac_ctrl_drv->get_link_status(mac_ctrl_drv, &link_status);
+ if (!link_status)
+ break;
+ }
+
+ return link_status;
+}
+
+void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status)
+{
+ struct mac_driver *mac_ctrl_drv;
+ int ret, sfp_prsnt;
+
+ mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+ if (mac_ctrl_drv->get_link_status)
+ mac_ctrl_drv->get_link_status(mac_ctrl_drv, link_status);
+ else
+ *link_status = 0;
+
+ if (mac_cb->media_type == HNAE_MEDIA_TYPE_FIBER) {
+ ret = mac_cb->dsaf_dev->misc_op->get_sfp_prsnt(mac_cb,
+ &sfp_prsnt);
+ if (!ret)
+ *link_status = *link_status && sfp_prsnt;
+
+ /* for FIBER port, it may have a fake link up.
+ * when the link status changes from down to up, we need to do
+ * anti-shake. the anti-shake time is base on tests.
+ * only FIBER port need to do this.
+ */
+ if (*link_status && !mac_cb->link)
+ *link_status = hns_mac_link_anti_shake(mac_ctrl_drv);
+ }
+
+ mac_cb->link = *link_status;
+}
+
+int hns_mac_get_port_info(struct hns_mac_cb *mac_cb,
+ u8 *auto_neg, u16 *speed, u8 *duplex)
+{
+ struct mac_driver *mac_ctrl_drv;
+ struct mac_info info;
+
+ mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+ if (!mac_ctrl_drv->get_info)
+ return -ENODEV;
+
+ mac_ctrl_drv->get_info(mac_ctrl_drv, &info);
+ if (auto_neg)
+ *auto_neg = info.auto_neg;
+ if (speed)
+ *speed = info.speed;
+ if (duplex)
+ *duplex = info.duplex;
+
+ return 0;
+}
+
+/**
+ *hns_mac_need_adjust_link - check is need change mac speed and duplex register
+ *@mac_cb: mac device
+ *@speed: phy device speed
+ *@duplex:phy device duplex
+ *
+ */
+bool hns_mac_need_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex)
+{
+ struct mac_driver *mac_ctrl_drv;
+
+ mac_ctrl_drv = (struct mac_driver *)(mac_cb->priv.mac);
+
+ if (mac_ctrl_drv->need_adjust_link)
+ return mac_ctrl_drv->need_adjust_link(mac_ctrl_drv,
+ (enum mac_speed)speed, duplex);
+ else
+ return true;
+}
+
+void hns_mac_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex)
+{
+ int ret;
+ struct mac_driver *mac_ctrl_drv;
+
+ mac_ctrl_drv = (struct mac_driver *)(mac_cb->priv.mac);
+
+ mac_cb->speed = speed;
+ mac_cb->half_duplex = !duplex;
+
+ if (mac_ctrl_drv->adjust_link) {
+ ret = mac_ctrl_drv->adjust_link(mac_ctrl_drv,
+ (enum mac_speed)speed, duplex);
+ if (ret) {
+ dev_err(mac_cb->dev,
+ "adjust_link failed, %s mac%d ret = %#x!\n",
+ mac_cb->dsaf_dev->ae_dev.name,
+ mac_cb->mac_id, ret);
+ return;
+ }
+ }
+}
+
+/**
+ *hns_mac_get_inner_port_num - get mac table inner port number
+ *@mac_cb: mac device
+ *@vmid: vm id
+ *@port_num:port number
+ *
+ */
+int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb, u8 vmid, u8 *port_num)
+{
+ int q_num_per_vf, vf_num_per_port;
+ int vm_queue_id;
+ u8 tmp_port;
+
+ if (mac_cb->dsaf_dev->dsaf_mode <= DSAF_MODE_ENABLE) {
+ if (mac_cb->mac_id != DSAF_MAX_PORT_NUM) {
+ dev_err(mac_cb->dev,
+ "input invalid, %s mac%d vmid%d !\n",
+ mac_cb->dsaf_dev->ae_dev.name,
+ mac_cb->mac_id, vmid);
+ return -EINVAL;
+ }
+ } else if (mac_cb->dsaf_dev->dsaf_mode < DSAF_MODE_MAX) {
+ if (mac_cb->mac_id >= DSAF_MAX_PORT_NUM) {
+ dev_err(mac_cb->dev,
+ "input invalid, %s mac%d vmid%d!\n",
+ mac_cb->dsaf_dev->ae_dev.name,
+ mac_cb->mac_id, vmid);
+ return -EINVAL;
+ }
+ } else {
+ dev_err(mac_cb->dev, "dsaf mode invalid, %s mac%d!\n",
+ mac_cb->dsaf_dev->ae_dev.name, mac_cb->mac_id);
+ return -EINVAL;
+ }
+
+ if (vmid >= mac_cb->dsaf_dev->rcb_common[0]->max_vfn) {
+ dev_err(mac_cb->dev, "input invalid, %s mac%d vmid%d !\n",
+ mac_cb->dsaf_dev->ae_dev.name, mac_cb->mac_id, vmid);
+ return -EINVAL;
+ }
+
+ q_num_per_vf = mac_cb->dsaf_dev->rcb_common[0]->max_q_per_vf;
+ vf_num_per_port = mac_cb->dsaf_dev->rcb_common[0]->max_vfn;
+
+ vm_queue_id = vmid * q_num_per_vf +
+ vf_num_per_port * q_num_per_vf * mac_cb->mac_id;
+
+ switch (mac_cb->dsaf_dev->dsaf_mode) {
+ case DSAF_MODE_ENABLE_FIX:
+ tmp_port = 0;
+ break;
+ case DSAF_MODE_DISABLE_FIX:
+ tmp_port = 0;
+ break;
+ case DSAF_MODE_ENABLE_0VM:
+ case DSAF_MODE_ENABLE_8VM:
+ case DSAF_MODE_ENABLE_16VM:
+ case DSAF_MODE_ENABLE_32VM:
+ case DSAF_MODE_ENABLE_128VM:
+ case DSAF_MODE_DISABLE_2PORT_8VM:
+ case DSAF_MODE_DISABLE_2PORT_16VM:
+ case DSAF_MODE_DISABLE_2PORT_64VM:
+ case DSAF_MODE_DISABLE_6PORT_0VM:
+ case DSAF_MODE_DISABLE_6PORT_2VM:
+ case DSAF_MODE_DISABLE_6PORT_4VM:
+ case DSAF_MODE_DISABLE_6PORT_16VM:
+ tmp_port = vm_queue_id;
+ break;
+ default:
+ dev_err(mac_cb->dev, "dsaf mode invalid, %s mac%d!\n",
+ mac_cb->dsaf_dev->ae_dev.name, mac_cb->mac_id);
+ return -EINVAL;
+ }
+ tmp_port += DSAF_BASE_INNER_PORT_NUM;
+
+ *port_num = tmp_port;
+
+ return 0;
+}
+
+/**
+ *hns_mac_change_vf_addr - change vf mac address
+ *@mac_cb: mac device
+ *@vmid: vmid
+ *@addr:mac address
+ */
+int hns_mac_change_vf_addr(struct hns_mac_cb *mac_cb,
+ u32 vmid, const char *addr)
+{
+ int ret;
+ struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+ struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
+ struct dsaf_drv_mac_single_dest_entry mac_entry;
+ struct mac_entry_idx *old_entry;
+
+ old_entry = &mac_cb->addr_entry_idx[vmid];
+ if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) {
+ memcpy(mac_entry.addr, addr, sizeof(mac_entry.addr));
+ mac_entry.in_vlan_id = old_entry->vlan_id;
+ mac_entry.in_port_num = mac_cb->mac_id;
+ ret = hns_mac_get_inner_port_num(mac_cb, (u8)vmid,
+ &mac_entry.port_num);
+ if (ret)
+ return ret;
+
+ if ((old_entry->valid != 0) &&
+ (memcmp(old_entry->addr,
+ addr, sizeof(mac_entry.addr)) != 0)) {
+ ret = hns_dsaf_del_mac_entry(dsaf_dev,
+ old_entry->vlan_id,
+ mac_cb->mac_id,
+ old_entry->addr);
+ if (ret)
+ return ret;
+ }
+
+ ret = hns_dsaf_set_mac_uc_entry(dsaf_dev, &mac_entry);
+ if (ret)
+ return ret;
+ }
+
+ if ((mac_ctrl_drv->set_mac_addr) && (vmid == 0))
+ mac_ctrl_drv->set_mac_addr(mac_cb->priv.mac, addr);
+
+ memcpy(old_entry->addr, addr, sizeof(old_entry->addr));
+ old_entry->valid = 1;
+ return 0;
+}
+
+int hns_mac_add_uc_addr(struct hns_mac_cb *mac_cb, u8 vf_id,
+ const unsigned char *addr)
+{
+ struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
+ struct dsaf_drv_mac_single_dest_entry mac_entry;
+ int ret;
+
+ if (HNS_DSAF_IS_DEBUG(dsaf_dev))
+ return -ENOSPC;
+
+ memset(&mac_entry, 0, sizeof(mac_entry));
+ memcpy(mac_entry.addr, addr, sizeof(mac_entry.addr));
+ mac_entry.in_port_num = mac_cb->mac_id;
+ ret = hns_mac_get_inner_port_num(mac_cb, vf_id, &mac_entry.port_num);
+ if (ret)
+ return ret;
+
+ return hns_dsaf_set_mac_uc_entry(dsaf_dev, &mac_entry);
+}
+
+int hns_mac_rm_uc_addr(struct hns_mac_cb *mac_cb, u8 vf_id,
+ const unsigned char *addr)
+{
+ struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
+ struct dsaf_drv_mac_single_dest_entry mac_entry;
+ int ret;
+
+ if (HNS_DSAF_IS_DEBUG(dsaf_dev))
+ return -ENOSPC;
+
+ memset(&mac_entry, 0, sizeof(mac_entry));
+ memcpy(mac_entry.addr, addr, sizeof(mac_entry.addr));
+ mac_entry.in_port_num = mac_cb->mac_id;
+ ret = hns_mac_get_inner_port_num(mac_cb, vf_id, &mac_entry.port_num);
+ if (ret)
+ return ret;
+
+ return hns_dsaf_rm_mac_addr(dsaf_dev, &mac_entry);
+}
+
+int hns_mac_set_multi(struct hns_mac_cb *mac_cb,
+ u32 port_num, char *addr, bool enable)
+{
+ int ret;
+ struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
+ struct dsaf_drv_mac_single_dest_entry mac_entry;
+
+ if (!HNS_DSAF_IS_DEBUG(dsaf_dev) && addr) {
+ memcpy(mac_entry.addr, addr, sizeof(mac_entry.addr));
+ mac_entry.in_vlan_id = 0;/*vlan_id;*/
+ mac_entry.in_port_num = mac_cb->mac_id;
+ mac_entry.port_num = port_num;
+
+ if (!enable)
+ ret = hns_dsaf_del_mac_mc_port(dsaf_dev, &mac_entry);
+ else
+ ret = hns_dsaf_add_mac_mc_port(dsaf_dev, &mac_entry);
+ if (ret) {
+ dev_err(dsaf_dev->dev,
+ "set mac mc port failed, %s mac%d ret = %#x!\n",
+ mac_cb->dsaf_dev->ae_dev.name,
+ mac_cb->mac_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int hns_mac_clr_multicast(struct hns_mac_cb *mac_cb, int vfn)
+{
+ struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
+ u8 port_num;
+ int ret = hns_mac_get_inner_port_num(mac_cb, vfn, &port_num);
+
+ if (ret)
+ return ret;
+
+ return hns_dsaf_clr_mac_mc_port(dsaf_dev, mac_cb->mac_id, port_num);
+}
+
+static void hns_mac_param_get(struct mac_params *param,
+ struct hns_mac_cb *mac_cb)
+{
+ param->vaddr = mac_cb->vaddr;
+ param->mac_mode = hns_get_enet_interface(mac_cb);
+ ether_addr_copy(param->addr, mac_cb->addr_entry_idx[0].addr);
+ param->mac_id = mac_cb->mac_id;
+ param->dev = mac_cb->dev;
+}
+
+/**
+ * hns_mac_port_config_bc_en - set broadcast rx&tx enable
+ * @mac_cb: mac device
+ * @port_num: queue number
+ * @vlan_id: vlan id`
+ * @enable: enable
+ * return 0 - success , negative --fail
+ */
+static int hns_mac_port_config_bc_en(struct hns_mac_cb *mac_cb,
+ u32 port_num, u16 vlan_id, bool enable)
+{
+ int ret;
+ struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
+ struct dsaf_drv_mac_single_dest_entry mac_entry;
+
+ /* directy return ok in debug network mode */
+ if (mac_cb->mac_type == HNAE_PORT_DEBUG)
+ return 0;
+
+ if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) {
+ eth_broadcast_addr(mac_entry.addr);
+ mac_entry.in_vlan_id = vlan_id;
+ mac_entry.in_port_num = mac_cb->mac_id;
+ mac_entry.port_num = port_num;
+
+ if (!enable)
+ ret = hns_dsaf_del_mac_mc_port(dsaf_dev, &mac_entry);
+ else
+ ret = hns_dsaf_add_mac_mc_port(dsaf_dev, &mac_entry);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * hns_mac_vm_config_bc_en - set broadcast rx&tx enable
+ * @mac_cb: mac device
+ * @vmid: vm id
+ * @enable: enable
+ * return 0 - success , negative --fail
+ */
+int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vmid, bool enable)
+{
+ int ret;
+ struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
+ u8 port_num;
+ struct mac_entry_idx *uc_mac_entry;
+ struct dsaf_drv_mac_single_dest_entry mac_entry;
+
+ if (mac_cb->mac_type == HNAE_PORT_DEBUG)
+ return 0;
+
+ uc_mac_entry = &mac_cb->addr_entry_idx[vmid];
+
+ if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) {
+ eth_broadcast_addr(mac_entry.addr);
+ mac_entry.in_vlan_id = uc_mac_entry->vlan_id;
+ mac_entry.in_port_num = mac_cb->mac_id;
+ ret = hns_mac_get_inner_port_num(mac_cb, vmid, &port_num);
+ if (ret)
+ return ret;
+ mac_entry.port_num = port_num;
+
+ if (!enable)
+ ret = hns_dsaf_del_mac_mc_port(dsaf_dev, &mac_entry);
+ else
+ ret = hns_dsaf_add_mac_mc_port(dsaf_dev, &mac_entry);
+ return ret;
+ }
+
+ return 0;
+}
+
+int hns_mac_wait_fifo_clean(struct hns_mac_cb *mac_cb)
+{
+ struct mac_driver *drv = hns_mac_get_drv(mac_cb);
+
+ if (drv->wait_fifo_clean)
+ return drv->wait_fifo_clean(drv);
+
+ return 0;
+}
+
+void hns_mac_reset(struct hns_mac_cb *mac_cb)
+{
+ struct mac_driver *drv = hns_mac_get_drv(mac_cb);
+ bool is_ver1 = AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver);
+
+ drv->mac_init(drv);
+
+ if (drv->config_max_frame_length)
+ drv->config_max_frame_length(drv, mac_cb->max_frm);
+
+ if (drv->set_tx_auto_pause_frames)
+ drv->set_tx_auto_pause_frames(drv, mac_cb->tx_pause_frm_time);
+
+ if (drv->set_an_mode)
+ drv->set_an_mode(drv, 1);
+
+ if (drv->mac_pausefrm_cfg) {
+ if (mac_cb->mac_type == HNAE_PORT_DEBUG)
+ drv->mac_pausefrm_cfg(drv, !is_ver1, !is_ver1);
+ else /* mac rx must disable, dsaf pfc close instead of it*/
+ drv->mac_pausefrm_cfg(drv, 0, 1);
+ }
+}
+
+int hns_mac_set_mtu(struct hns_mac_cb *mac_cb, u32 new_mtu, u32 buf_size)
+{
+ struct mac_driver *drv = hns_mac_get_drv(mac_cb);
+ u32 new_frm = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+
+ if (new_frm > HNS_RCB_RING_MAX_BD_PER_PKT * buf_size)
+ return -EINVAL;
+
+ if (!drv->config_max_frame_length)
+ return -ECHILD;
+
+ /* adjust max frame to be at least the size of a standard frame */
+ if (new_frm < (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN))
+ new_frm = (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN);
+
+ drv->config_max_frame_length(drv, new_frm);
+
+ mac_cb->max_frm = new_frm;
+
+ return 0;
+}
+
+void hns_mac_start(struct hns_mac_cb *mac_cb)
+{
+ struct mac_driver *mac_drv = hns_mac_get_drv(mac_cb);
+
+ /* for virt */
+ if (mac_drv->mac_en_flg == MAC_EN_FLAG_V) {
+ /*plus 1 when the virtual mac has been enabled */
+ mac_drv->virt_dev_num += 1;
+ return;
+ }
+
+ if (mac_drv->mac_enable) {
+ mac_drv->mac_enable(mac_cb->priv.mac, MAC_COMM_MODE_RX_AND_TX);
+ mac_drv->mac_en_flg = MAC_EN_FLAG_V;
+ }
+}
+
+void hns_mac_stop(struct hns_mac_cb *mac_cb)
+{
+ struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+ /*modified for virtualization */
+ if (mac_ctrl_drv->virt_dev_num > 0) {
+ mac_ctrl_drv->virt_dev_num -= 1;
+ if (mac_ctrl_drv->virt_dev_num > 0)
+ return;
+ }
+
+ if (mac_ctrl_drv->mac_disable)
+ mac_ctrl_drv->mac_disable(mac_cb->priv.mac,
+ MAC_COMM_MODE_RX_AND_TX);
+
+ mac_ctrl_drv->mac_en_flg = 0;
+ mac_cb->link = 0;
+ mac_cb->dsaf_dev->misc_op->cpld_reset_led(mac_cb);
+}
+
+/**
+ * hns_mac_get_autoneg - get auto autonegotiation
+ * @mac_cb: mac control block
+ * @auto_neg: output pointer to autoneg result
+ * return 0 - success , negative --fail
+ */
+void hns_mac_get_autoneg(struct hns_mac_cb *mac_cb, u32 *auto_neg)
+{
+ struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+ if (mac_ctrl_drv->autoneg_stat)
+ mac_ctrl_drv->autoneg_stat(mac_ctrl_drv, auto_neg);
+ else
+ *auto_neg = 0;
+}
+
+/**
+ * hns_mac_get_pauseparam - set rx & tx pause parameter
+ * @mac_cb: mac control block
+ * @rx_en: rx enable status
+ * @tx_en: tx enable status
+ * return 0 - success , negative --fail
+ */
+void hns_mac_get_pauseparam(struct hns_mac_cb *mac_cb, u32 *rx_en, u32 *tx_en)
+{
+ struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+ if (mac_ctrl_drv->get_pause_enable) {
+ mac_ctrl_drv->get_pause_enable(mac_ctrl_drv, rx_en, tx_en);
+ } else {
+ *rx_en = 0;
+ *tx_en = 0;
+ }
+}
+
+/**
+ * hns_mac_set_autoneg - set auto autonegotiation
+ * @mac_cb: mac control block
+ * @enable: enable or not
+ * return 0 - success , negative --fail
+ */
+int hns_mac_set_autoneg(struct hns_mac_cb *mac_cb, u8 enable)
+{
+ struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+ if (mac_cb->phy_if == PHY_INTERFACE_MODE_XGMII && enable) {
+ dev_err(mac_cb->dev, "enabling autoneg is not allowed!\n");
+ return -ENOTSUPP;
+ }
+
+ if (mac_ctrl_drv->set_an_mode)
+ mac_ctrl_drv->set_an_mode(mac_ctrl_drv, enable);
+
+ return 0;
+}
+
+/**
+ * hns_mac_set_pauseparam - set rx & tx pause parameter
+ * @mac_cb: mac control block
+ * @rx_en: rx enable or not
+ * @tx_en: tx enable or not
+ * return 0 - success , negative --fail
+ */
+int hns_mac_set_pauseparam(struct hns_mac_cb *mac_cb, u32 rx_en, u32 tx_en)
+{
+ struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+ bool is_ver1 = AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver);
+
+ if (mac_cb->mac_type == HNAE_PORT_DEBUG) {
+ if (is_ver1 && (tx_en || rx_en)) {
+ dev_err(mac_cb->dev, "macv1 can't enable tx/rx_pause!\n");
+ return -EINVAL;
+ }
+ }
+
+ if (mac_ctrl_drv->mac_pausefrm_cfg)
+ mac_ctrl_drv->mac_pausefrm_cfg(mac_ctrl_drv, rx_en, tx_en);
+
+ return 0;
+}
+
+/**
+ * hns_mac_init_ex - mac init
+ * @mac_cb: mac control block
+ * return 0 - success , negative --fail
+ */
+static int hns_mac_init_ex(struct hns_mac_cb *mac_cb)
+{
+ int ret;
+ struct mac_params param;
+ struct mac_driver *drv;
+
+ hns_dsaf_fix_mac_mode(mac_cb);
+
+ memset(&param, 0, sizeof(struct mac_params));
+ hns_mac_param_get(&param, mac_cb);
+
+ if (MAC_SPEED_FROM_MODE(param.mac_mode) < MAC_SPEED_10000)
+ drv = (struct mac_driver *)hns_gmac_config(mac_cb, &param);
+ else
+ drv = (struct mac_driver *)hns_xgmac_config(mac_cb, &param);
+
+ if (!drv)
+ return -ENOMEM;
+
+ mac_cb->priv.mac = (void *)drv;
+ hns_mac_reset(mac_cb);
+
+ hns_mac_adjust_link(mac_cb, mac_cb->speed, !mac_cb->half_duplex);
+
+ ret = hns_mac_port_config_bc_en(mac_cb, mac_cb->mac_id, 0, true);
+ if (ret)
+ goto free_mac_drv;
+
+ return 0;
+
+free_mac_drv:
+ drv->mac_free(mac_cb->priv.mac);
+ mac_cb->priv.mac = NULL;
+
+ return ret;
+}
+
+static int
+hns_mac_phy_parse_addr(struct device *dev, struct fwnode_handle *fwnode)
+{
+ u32 addr;
+ int ret;
+
+ ret = fwnode_property_read_u32(fwnode, "phy-addr", &addr);
+ if (ret) {
+ dev_err(dev, "has invalid PHY address ret:%d\n", ret);
+ return ret;
+ }
+
+ if (addr >= PHY_MAX_ADDR) {
+ dev_err(dev, "PHY address %i is too large\n", addr);
+ return -EINVAL;
+ }
+
+ return addr;
+}
+
+static int
+hns_mac_register_phydev(struct mii_bus *mdio, struct hns_mac_cb *mac_cb,
+ u32 addr)
+{
+ struct phy_device *phy;
+ const char *phy_type;
+ bool is_c45;
+ int rc;
+
+ rc = fwnode_property_read_string(mac_cb->fw_port,
+ "phy-mode", &phy_type);
+ if (rc < 0)
+ return rc;
+
+ if (!strcmp(phy_type, phy_modes(PHY_INTERFACE_MODE_XGMII)))
+ is_c45 = true;
+ else if (!strcmp(phy_type, phy_modes(PHY_INTERFACE_MODE_SGMII)))
+ is_c45 = false;
+ else
+ return -ENODATA;
+
+ phy = get_phy_device(mdio, addr, is_c45);
+ if (!phy || IS_ERR(phy))
+ return -EIO;
+
+ phy->irq = mdio->irq[addr];
+
+ /* All data is now stored in the phy struct;
+ * register it
+ */
+ rc = phy_device_register(phy);
+ if (rc) {
+ phy_device_free(phy);
+ dev_err(&mdio->dev, "registered phy fail at address %i\n",
+ addr);
+ return -ENODEV;
+ }
+
+ mac_cb->phy_dev = phy;
+
+ dev_dbg(&mdio->dev, "registered phy at address %i\n", addr);
+
+ return 0;
+}
+
+static int hns_mac_register_phy(struct hns_mac_cb *mac_cb)
+{
+ struct fwnode_reference_args args;
+ struct platform_device *pdev;
+ struct mii_bus *mii_bus;
+ int rc;
+ int addr;
+
+ /* Loop over the child nodes and register a phy_device for each one */
+ if (!to_acpi_device_node(mac_cb->fw_port))
+ return -ENODEV;
+
+ rc = acpi_node_get_property_reference(
+ mac_cb->fw_port, "mdio-node", 0, &args);
+ if (rc)
+ return rc;
+ if (!is_acpi_device_node(args.fwnode))
+ return -EINVAL;
+
+ addr = hns_mac_phy_parse_addr(mac_cb->dev, mac_cb->fw_port);
+ if (addr < 0)
+ return addr;
+
+ /* dev address in adev */
+ pdev = hns_dsaf_find_platform_device(args.fwnode);
+ if (!pdev) {
+ dev_err(mac_cb->dev, "mac%d mdio pdev is NULL\n",
+ mac_cb->mac_id);
+ return -EINVAL;
+ }
+
+ mii_bus = platform_get_drvdata(pdev);
+ if (!mii_bus) {
+ dev_err(mac_cb->dev,
+ "mac%d mdio is NULL, dsaf will probe again later\n",
+ mac_cb->mac_id);
+ return -EPROBE_DEFER;
+ }
+
+ rc = hns_mac_register_phydev(mii_bus, mac_cb, addr);
+ if (!rc)
+ dev_dbg(mac_cb->dev, "mac%d register phy addr:%d\n",
+ mac_cb->mac_id, addr);
+
+ return rc;
+}
+
+static void hns_mac_remove_phydev(struct hns_mac_cb *mac_cb)
+{
+ if (!to_acpi_device_node(mac_cb->fw_port) || !mac_cb->phy_dev)
+ return;
+
+ phy_device_remove(mac_cb->phy_dev);
+ phy_device_free(mac_cb->phy_dev);
+
+ mac_cb->phy_dev = NULL;
+}
+
+#define MAC_MEDIA_TYPE_MAX_LEN 16
+
+static const struct {
+ enum hnae_media_type value;
+ const char *name;
+} media_type_defs[] = {
+ {HNAE_MEDIA_TYPE_UNKNOWN, "unknown" },
+ {HNAE_MEDIA_TYPE_FIBER, "fiber" },
+ {HNAE_MEDIA_TYPE_COPPER, "copper" },
+ {HNAE_MEDIA_TYPE_BACKPLANE, "backplane" },
+};
+
+/**
+ *hns_mac_get_info - get mac information from device node
+ *@mac_cb: mac device
+ * return: 0 --success, negative --fail
+ */
+static int hns_mac_get_info(struct hns_mac_cb *mac_cb)
+{
+ struct device_node *np;
+ struct regmap *syscon;
+ struct of_phandle_args cpld_args;
+ const char *media_type;
+ u32 i;
+ u32 ret;
+
+ mac_cb->link = false;
+ mac_cb->half_duplex = false;
+ mac_cb->media_type = HNAE_MEDIA_TYPE_UNKNOWN;
+ mac_cb->speed = mac_phy_to_speed[mac_cb->phy_if];
+ mac_cb->max_speed = mac_cb->speed;
+
+ if (mac_cb->phy_if == PHY_INTERFACE_MODE_SGMII) {
+ mac_cb->if_support = MAC_GMAC_SUPPORTED;
+ mac_cb->if_support |= SUPPORTED_1000baseT_Full;
+ } else if (mac_cb->phy_if == PHY_INTERFACE_MODE_XGMII) {
+ mac_cb->if_support = SUPPORTED_10000baseR_FEC;
+ mac_cb->if_support |= SUPPORTED_10000baseKR_Full;
+ }
+
+ mac_cb->max_frm = MAC_DEFAULT_MTU;
+ mac_cb->tx_pause_frm_time = MAC_DEFAULT_PAUSE_TIME;
+ mac_cb->port_rst_off = mac_cb->mac_id;
+ mac_cb->port_mode_off = 0;
+
+ /* if the dsaf node doesn't contain a port subnode, get phy-handle
+ * from dsaf node
+ */
+ if (!mac_cb->fw_port) {
+ np = of_parse_phandle(mac_cb->dev->of_node, "phy-handle",
+ mac_cb->mac_id);
+ mac_cb->phy_dev = of_phy_find_device(np);
+ if (mac_cb->phy_dev) {
+ /* refcount is held by of_phy_find_device()
+ * if the phy_dev is found
+ */
+ put_device(&mac_cb->phy_dev->mdio.dev);
+
+ dev_dbg(mac_cb->dev, "mac%d phy_node: %pOFn\n",
+ mac_cb->mac_id, np);
+ }
+ of_node_put(np);
+
+ return 0;
+ }
+
+ if (is_of_node(mac_cb->fw_port)) {
+ /* parse property from port subnode in dsaf */
+ np = of_parse_phandle(to_of_node(mac_cb->fw_port),
+ "phy-handle", 0);
+ mac_cb->phy_dev = of_phy_find_device(np);
+ if (mac_cb->phy_dev) {
+ /* refcount is held by of_phy_find_device()
+ * if the phy_dev is found
+ */
+ put_device(&mac_cb->phy_dev->mdio.dev);
+ dev_dbg(mac_cb->dev, "mac%d phy_node: %pOFn\n",
+ mac_cb->mac_id, np);
+ }
+ of_node_put(np);
+
+ np = of_parse_phandle(to_of_node(mac_cb->fw_port),
+ "serdes-syscon", 0);
+ syscon = syscon_node_to_regmap(np);
+ of_node_put(np);
+ if (IS_ERR_OR_NULL(syscon)) {
+ dev_err(mac_cb->dev, "serdes-syscon is needed!\n");
+ return -EINVAL;
+ }
+ mac_cb->serdes_ctrl = syscon;
+
+ ret = fwnode_property_read_u32(mac_cb->fw_port,
+ "port-rst-offset",
+ &mac_cb->port_rst_off);
+ if (ret) {
+ dev_dbg(mac_cb->dev,
+ "mac%d port-rst-offset not found, use default value.\n",
+ mac_cb->mac_id);
+ }
+
+ ret = fwnode_property_read_u32(mac_cb->fw_port,
+ "port-mode-offset",
+ &mac_cb->port_mode_off);
+ if (ret) {
+ dev_dbg(mac_cb->dev,
+ "mac%d port-mode-offset not found, use default value.\n",
+ mac_cb->mac_id);
+ }
+
+ ret = of_parse_phandle_with_fixed_args(
+ to_of_node(mac_cb->fw_port), "cpld-syscon", 1, 0,
+ &cpld_args);
+ if (ret) {
+ dev_dbg(mac_cb->dev, "mac%d no cpld-syscon found.\n",
+ mac_cb->mac_id);
+ mac_cb->cpld_ctrl = NULL;
+ } else {
+ syscon = syscon_node_to_regmap(cpld_args.np);
+ if (IS_ERR_OR_NULL(syscon)) {
+ dev_dbg(mac_cb->dev, "no cpld-syscon found!\n");
+ mac_cb->cpld_ctrl = NULL;
+ } else {
+ mac_cb->cpld_ctrl = syscon;
+ mac_cb->cpld_ctrl_reg = cpld_args.args[0];
+ }
+ }
+ } else if (is_acpi_node(mac_cb->fw_port)) {
+ ret = hns_mac_register_phy(mac_cb);
+ /* Mac can work well if there is phy or not.If the port don't
+ * connect with phy, the return value will be ignored. Only
+ * when there is phy but can't find mdio bus, the return value
+ * will be handled.
+ */
+ if (ret == -EPROBE_DEFER)
+ return ret;
+ } else {
+ dev_err(mac_cb->dev, "mac%d cannot find phy node\n",
+ mac_cb->mac_id);
+ }
+
+ if (!fwnode_property_read_string(mac_cb->fw_port, "media-type",
+ &media_type)) {
+ for (i = 0; i < ARRAY_SIZE(media_type_defs); i++) {
+ if (!strncmp(media_type_defs[i].name, media_type,
+ MAC_MEDIA_TYPE_MAX_LEN)) {
+ mac_cb->media_type = media_type_defs[i].value;
+ break;
+ }
+ }
+ }
+
+ if (fwnode_property_read_u8_array(mac_cb->fw_port, "mc-mac-mask",
+ mac_cb->mc_mask, ETH_ALEN)) {
+ dev_warn(mac_cb->dev,
+ "no mc-mac-mask property, set to default value.\n");
+ eth_broadcast_addr(mac_cb->mc_mask);
+ }
+
+ return 0;
+}
+
+/**
+ * hns_mac_get_mode - get mac mode
+ * @phy_if: phy interface
+ * return 0 - gmac, 1 - xgmac , negative --fail
+ */
+static int hns_mac_get_mode(phy_interface_t phy_if)
+{
+ switch (phy_if) {
+ case PHY_INTERFACE_MODE_SGMII:
+ return MAC_GMAC_IDX;
+ case PHY_INTERFACE_MODE_XGMII:
+ return MAC_XGMAC_IDX;
+ default:
+ return -EINVAL;
+ }
+}
+
+static u8 __iomem *
+hns_mac_get_vaddr(struct dsaf_device *dsaf_dev,
+ struct hns_mac_cb *mac_cb, u32 mac_mode_idx)
+{
+ u8 __iomem *base = dsaf_dev->io_base;
+ int mac_id = mac_cb->mac_id;
+
+ if (mac_cb->mac_type == HNAE_PORT_SERVICE)
+ return base + 0x40000 + mac_id * 0x4000 -
+ mac_mode_idx * 0x20000;
+ else
+ return dsaf_dev->ppe_base + 0x1000;
+}
+
+/**
+ * hns_mac_get_cfg - get mac cfg from dtb or acpi table
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @mac_cb: mac control block
+ * return 0 - success , negative --fail
+ */
+static int
+hns_mac_get_cfg(struct dsaf_device *dsaf_dev, struct hns_mac_cb *mac_cb)
+{
+ int ret;
+ u32 mac_mode_idx;
+
+ mac_cb->dsaf_dev = dsaf_dev;
+ mac_cb->dev = dsaf_dev->dev;
+
+ mac_cb->sys_ctl_vaddr = dsaf_dev->sc_base;
+ mac_cb->serdes_vaddr = dsaf_dev->sds_base;
+
+ mac_cb->sfp_prsnt = 0;
+ mac_cb->txpkt_for_led = 0;
+ mac_cb->rxpkt_for_led = 0;
+
+ if (!HNS_DSAF_IS_DEBUG(dsaf_dev))
+ mac_cb->mac_type = HNAE_PORT_SERVICE;
+ else
+ mac_cb->mac_type = HNAE_PORT_DEBUG;
+
+ mac_cb->phy_if = dsaf_dev->misc_op->get_phy_if(mac_cb);
+
+ ret = hns_mac_get_mode(mac_cb->phy_if);
+ if (ret < 0) {
+ dev_err(dsaf_dev->dev,
+ "hns_mac_get_mode failed, mac%d ret = %#x!\n",
+ mac_cb->mac_id, ret);
+ return ret;
+ }
+ mac_mode_idx = (u32)ret;
+
+ ret = hns_mac_get_info(mac_cb);
+ if (ret)
+ return ret;
+
+ mac_cb->dsaf_dev->misc_op->cpld_reset_led(mac_cb);
+ mac_cb->vaddr = hns_mac_get_vaddr(dsaf_dev, mac_cb, mac_mode_idx);
+
+ return 0;
+}
+
+static int hns_mac_get_max_port_num(struct dsaf_device *dsaf_dev)
+{
+ if (HNS_DSAF_IS_DEBUG(dsaf_dev))
+ return 1;
+ else
+ return DSAF_MAX_PORT_NUM;
+}
+
+void hns_mac_enable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode)
+{
+ struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+ mac_ctrl_drv->mac_enable(mac_cb->priv.mac, mode);
+}
+
+void hns_mac_disable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode)
+{
+ struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+ mac_ctrl_drv->mac_disable(mac_cb->priv.mac, mode);
+}
+
+/**
+ * hns_mac_init - init mac
+ * @dsaf_dev: dsa fabric device struct pointer
+ * return 0 - success , negative --fail
+ */
+int hns_mac_init(struct dsaf_device *dsaf_dev)
+{
+ bool found = false;
+ int ret;
+ u32 port_id;
+ int max_port_num = hns_mac_get_max_port_num(dsaf_dev);
+ struct hns_mac_cb *mac_cb;
+ struct fwnode_handle *child;
+
+ device_for_each_child_node(dsaf_dev->dev, child) {
+ ret = fwnode_property_read_u32(child, "reg", &port_id);
+ if (ret) {
+ fwnode_handle_put(child);
+ dev_err(dsaf_dev->dev,
+ "get reg fail, ret=%d!\n", ret);
+ return ret;
+ }
+ if (port_id >= max_port_num) {
+ fwnode_handle_put(child);
+ dev_err(dsaf_dev->dev,
+ "reg(%u) out of range!\n", port_id);
+ return -EINVAL;
+ }
+ mac_cb = devm_kzalloc(dsaf_dev->dev, sizeof(*mac_cb),
+ GFP_KERNEL);
+ if (!mac_cb) {
+ fwnode_handle_put(child);
+ return -ENOMEM;
+ }
+ mac_cb->fw_port = child;
+ mac_cb->mac_id = (u8)port_id;
+ dsaf_dev->mac_cb[port_id] = mac_cb;
+ found = true;
+ }
+
+ /* if don't get any port subnode from dsaf node
+ * will init all port then, this is compatible with the old dts
+ */
+ if (!found) {
+ for (port_id = 0; port_id < max_port_num; port_id++) {
+ mac_cb = devm_kzalloc(dsaf_dev->dev, sizeof(*mac_cb),
+ GFP_KERNEL);
+ if (!mac_cb)
+ return -ENOMEM;
+
+ mac_cb->mac_id = port_id;
+ dsaf_dev->mac_cb[port_id] = mac_cb;
+ }
+ }
+
+ /* init mac_cb for all port */
+ for (port_id = 0; port_id < max_port_num; port_id++) {
+ mac_cb = dsaf_dev->mac_cb[port_id];
+ if (!mac_cb)
+ continue;
+
+ ret = hns_mac_get_cfg(dsaf_dev, mac_cb);
+ if (ret)
+ return ret;
+
+ ret = hns_mac_init_ex(mac_cb);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+void hns_mac_uninit(struct dsaf_device *dsaf_dev)
+{
+ int i;
+ int max_port_num = hns_mac_get_max_port_num(dsaf_dev);
+
+ for (i = 0; i < max_port_num; i++) {
+ if (!dsaf_dev->mac_cb[i])
+ continue;
+
+ dsaf_dev->misc_op->cpld_reset_led(dsaf_dev->mac_cb[i]);
+ hns_mac_remove_phydev(dsaf_dev->mac_cb[i]);
+ dsaf_dev->mac_cb[i] = NULL;
+ }
+}
+
+int hns_mac_config_mac_loopback(struct hns_mac_cb *mac_cb,
+ enum hnae_loop loop, int en)
+{
+ int ret;
+ struct mac_driver *drv = hns_mac_get_drv(mac_cb);
+
+ if (drv->config_loopback)
+ ret = drv->config_loopback(drv, loop, en);
+ else
+ ret = -ENOTSUPP;
+
+ return ret;
+}
+
+void hns_mac_update_stats(struct hns_mac_cb *mac_cb)
+{
+ struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+ mac_ctrl_drv->update_stats(mac_ctrl_drv);
+}
+
+void hns_mac_get_stats(struct hns_mac_cb *mac_cb, u64 *data)
+{
+ struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+ mac_ctrl_drv->get_ethtool_stats(mac_ctrl_drv, data);
+}
+
+void hns_mac_get_strings(struct hns_mac_cb *mac_cb,
+ int stringset, u8 *data)
+{
+ struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+ mac_ctrl_drv->get_strings(stringset, data);
+}
+
+int hns_mac_get_sset_count(struct hns_mac_cb *mac_cb, int stringset)
+{
+ struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+ return mac_ctrl_drv->get_sset_count(stringset);
+}
+
+void hns_mac_set_promisc(struct hns_mac_cb *mac_cb, u8 en)
+{
+ struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+ hns_dsaf_set_promisc_tcam(mac_cb->dsaf_dev, mac_cb->mac_id, !!en);
+
+ if (mac_ctrl_drv->set_promiscuous)
+ mac_ctrl_drv->set_promiscuous(mac_ctrl_drv, en);
+}
+
+int hns_mac_get_regs_count(struct hns_mac_cb *mac_cb)
+{
+ struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+ return mac_ctrl_drv->get_regs_count();
+}
+
+void hns_mac_get_regs(struct hns_mac_cb *mac_cb, void *data)
+{
+ struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+ mac_ctrl_drv->get_regs(mac_ctrl_drv, data);
+}
+
+void hns_set_led_opt(struct hns_mac_cb *mac_cb)
+{
+ int nic_data;
+ int txpkts, rxpkts;
+
+ txpkts = mac_cb->txpkt_for_led - mac_cb->hw_stats.tx_good_pkts;
+ rxpkts = mac_cb->rxpkt_for_led - mac_cb->hw_stats.rx_good_pkts;
+ if (txpkts || rxpkts)
+ nic_data = 1;
+ else
+ nic_data = 0;
+ mac_cb->txpkt_for_led = mac_cb->hw_stats.tx_good_pkts;
+ mac_cb->rxpkt_for_led = mac_cb->hw_stats.rx_good_pkts;
+ mac_cb->dsaf_dev->misc_op->cpld_set_led(mac_cb, (int)mac_cb->link,
+ mac_cb->speed, nic_data);
+}
+
+int hns_cpld_led_set_id(struct hns_mac_cb *mac_cb,
+ enum hnae_led_state status)
+{
+ if (!mac_cb)
+ return 0;
+
+ return mac_cb->dsaf_dev->misc_op->cpld_set_led_id(mac_cb, status);
+}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
new file mode 100644
index 0000000000..edf0bcf76a
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
@@ -0,0 +1,467 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ */
+
+#ifndef _HNS_DSAF_MAC_H
+#define _HNS_DSAF_MAC_H
+
+#include <linux/if_vlan.h>
+#include <linux/kernel.h>
+#include <linux/phy.h>
+#include <linux/regmap.h>
+#include "hns_dsaf_main.h"
+
+struct dsaf_device;
+
+#define MAC_GMAC_SUPPORTED \
+ (SUPPORTED_10baseT_Half \
+ | SUPPORTED_10baseT_Full \
+ | SUPPORTED_100baseT_Half \
+ | SUPPORTED_100baseT_Full \
+ | SUPPORTED_Autoneg)
+
+#define MAC_DEFAULT_MTU (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN + ETH_DATA_LEN)
+#define MAC_MAX_MTU 9600
+#define MAC_MAX_MTU_V2 9728
+#define MAC_MIN_MTU 68
+#define MAC_MAX_MTU_DBG MAC_DEFAULT_MTU
+
+#define MAC_DEFAULT_PAUSE_TIME 0xffff
+
+#define MAC_GMAC_IDX 0
+#define MAC_XGMAC_IDX 1
+
+#define ETH_STATIC_REG 1
+#define ETH_DUMP_REG 5
+/* check mac addr broadcast */
+#define MAC_IS_BROADCAST(p) ((*(p) == 0xff) && (*((p) + 1) == 0xff) && \
+ (*((p) + 2) == 0xff) && (*((p) + 3) == 0xff) && \
+ (*((p) + 4) == 0xff) && (*((p) + 5) == 0xff))
+
+/* check mac addr is 01-00-5e-xx-xx-xx*/
+#define MAC_IS_L3_MULTICAST(p) ((*((p) + 0) == 0x01) && \
+ (*((p) + 1) == 0x00) && \
+ (*((p) + 2) == 0x5e))
+
+/*check the mac addr is 0 in all bit*/
+#define MAC_IS_ALL_ZEROS(p) ((*(p) == 0) && (*((p) + 1) == 0) && \
+ (*((p) + 2) == 0) && (*((p) + 3) == 0) && \
+ (*((p) + 4) == 0) && (*((p) + 5) == 0))
+
+/*check mac addr multicast*/
+#define MAC_IS_MULTICAST(p) ((*((u8 *)((p) + 0)) & 0x01) ? (1) : (0))
+
+struct mac_priv {
+ void *mac;
+};
+
+/* net speed */
+enum mac_speed {
+ MAC_SPEED_10 = 10, /**< 10 Mbps */
+ MAC_SPEED_100 = 100, /**< 100 Mbps */
+ MAC_SPEED_1000 = 1000, /**< 1000 Mbps = 1 Gbps */
+ MAC_SPEED_10000 = 10000 /**< 10000 Mbps = 10 Gbps */
+};
+
+/*mac interface keyword */
+enum mac_intf {
+ MAC_IF_NONE = 0x00000000, /**< interface not invalid */
+ MAC_IF_MII = 0x00010000, /**< MII interface */
+ MAC_IF_RMII = 0x00020000, /**< RMII interface */
+ MAC_IF_SMII = 0x00030000, /**< SMII interface */
+ MAC_IF_GMII = 0x00040000, /**< GMII interface */
+ MAC_IF_RGMII = 0x00050000, /**< RGMII interface */
+ MAC_IF_TBI = 0x00060000, /**< TBI interface */
+ MAC_IF_RTBI = 0x00070000, /**< RTBI interface */
+ MAC_IF_SGMII = 0x00080000, /**< SGMII interface */
+ MAC_IF_XGMII = 0x00090000, /**< XGMII interface */
+ MAC_IF_QSGMII = 0x000a0000 /**< QSGMII interface */
+};
+
+/*mac mode */
+enum mac_mode {
+ /**< Invalid Ethernet mode */
+ MAC_MODE_INVALID = 0,
+ /**< 10 Mbps MII */
+ MAC_MODE_MII_10 = (MAC_IF_MII | MAC_SPEED_10),
+ /**< 100 Mbps MII */
+ MAC_MODE_MII_100 = (MAC_IF_MII | MAC_SPEED_100),
+ /**< 10 Mbps RMII */
+ MAC_MODE_RMII_10 = (MAC_IF_RMII | MAC_SPEED_10),
+ /**< 100 Mbps RMII */
+ MAC_MODE_RMII_100 = (MAC_IF_RMII | MAC_SPEED_100),
+ /**< 10 Mbps SMII */
+ MAC_MODE_SMII_10 = (MAC_IF_SMII | MAC_SPEED_10),
+ /**< 100 Mbps SMII */
+ MAC_MODE_SMII_100 = (MAC_IF_SMII | MAC_SPEED_100),
+ /**< 1000 Mbps GMII */
+ MAC_MODE_GMII_1000 = (MAC_IF_GMII | MAC_SPEED_1000),
+ /**< 10 Mbps RGMII */
+ MAC_MODE_RGMII_10 = (MAC_IF_RGMII | MAC_SPEED_10),
+ /**< 100 Mbps RGMII */
+ MAC_MODE_RGMII_100 = (MAC_IF_RGMII | MAC_SPEED_100),
+ /**< 1000 Mbps RGMII */
+ MAC_MODE_RGMII_1000 = (MAC_IF_RGMII | MAC_SPEED_1000),
+ /**< 1000 Mbps TBI */
+ MAC_MODE_TBI_1000 = (MAC_IF_TBI | MAC_SPEED_1000),
+ /**< 1000 Mbps RTBI */
+ MAC_MODE_RTBI_1000 = (MAC_IF_RTBI | MAC_SPEED_1000),
+ /**< 10 Mbps SGMII */
+ MAC_MODE_SGMII_10 = (MAC_IF_SGMII | MAC_SPEED_10),
+ /**< 100 Mbps SGMII */
+ MAC_MODE_SGMII_100 = (MAC_IF_SGMII | MAC_SPEED_100),
+ /**< 1000 Mbps SGMII */
+ MAC_MODE_SGMII_1000 = (MAC_IF_SGMII | MAC_SPEED_1000),
+ /**< 10000 Mbps XGMII */
+ MAC_MODE_XGMII_10000 = (MAC_IF_XGMII | MAC_SPEED_10000),
+ /**< 1000 Mbps QSGMII */
+ MAC_MODE_QSGMII_1000 = (MAC_IF_QSGMII | MAC_SPEED_1000)
+};
+
+/*mac communicate mode*/
+enum mac_commom_mode {
+ MAC_COMM_MODE_NONE = 0, /**< No transmit/receive communication */
+ MAC_COMM_MODE_RX = 1, /**< Only receive communication */
+ MAC_COMM_MODE_TX = 2, /**< Only transmit communication */
+ MAC_COMM_MODE_RX_AND_TX = 3 /**< Both tx and rx communication */
+};
+
+/*mac statistics */
+struct mac_statistics {
+ u64 stat_pkts64; /* r-10G tr-DT 64 byte frame counter */
+ u64 stat_pkts65to127; /* r-10G 65 to 127 byte frame counter */
+ u64 stat_pkts128to255; /* r-10G 128 to 255 byte frame counter */
+ u64 stat_pkts256to511; /*r-10G 256 to 511 byte frame counter */
+ u64 stat_pkts512to1023;/* r-10G 512 to 1023 byte frame counter */
+ u64 stat_pkts1024to1518; /* r-10G 1024 to 1518 byte frame counter */
+ u64 stat_pkts1519to1522; /* r-10G 1519 to 1522 byte good frame count*/
+ /* Total number of packets that were less than 64 octets */
+ /* long with a wrong CRC.*/
+ u64 stat_fragments;
+ /* Total number of packets longer than valid maximum length octets */
+ u64 stat_jabbers;
+ /* number of dropped packets due to internal errors of */
+ /* the MAC Client. */
+ u64 stat_drop_events;
+ /* Incremented when frames of correct length but with */
+ /* CRC error are received.*/
+ u64 stat_crc_align_errors;
+ /* Total number of packets that were less than 64 octets */
+ /* long with a good CRC.*/
+ u64 stat_undersize_pkts;
+ u64 stat_oversize_pkts; /**< T,B.D*/
+
+ u64 stat_rx_pause; /**< Pause MAC Control received */
+ u64 stat_tx_pause; /**< Pause MAC Control sent */
+
+ u64 in_octets; /**< Total number of byte received. */
+ u64 in_pkts; /* Total number of packets received.*/
+ u64 in_mcast_pkts; /* Total number of multicast frame received */
+ u64 in_bcast_pkts; /* Total number of broadcast frame received */
+ /* Frames received, but discarded due to */
+ /* problems within the MAC RX. */
+ u64 in_discards;
+ u64 in_errors; /* Number of frames received with error: */
+ /* - FIFO Overflow Error */
+ /* - CRC Error */
+ /* - Frame Too Long Error */
+ /* - Alignment Error */
+ u64 out_octets; /*Total number of byte sent. */
+ u64 out_pkts; /**< Total number of packets sent .*/
+ u64 out_mcast_pkts; /* Total number of multicast frame sent */
+ u64 out_bcast_pkts; /* Total number of multicast frame sent */
+ /* Frames received, but discarded due to problems within */
+ /* the MAC TX N/A!.*/
+ u64 out_discards;
+ u64 out_errors; /*Number of frames transmitted with error: */
+ /* - FIFO Overflow Error */
+ /* - FIFO Underflow Error */
+ /* - Other */
+};
+
+/*mac para struct ,mac get param from nic or dsaf when initialize*/
+struct mac_params {
+ char addr[ETH_ALEN];
+ u8 __iomem *vaddr; /*virtual address*/
+ struct device *dev;
+ u8 mac_id;
+ /**< Ethernet operation mode (MAC-PHY interface and speed) */
+ enum mac_mode mac_mode;
+};
+
+struct mac_info {
+ u16 speed;/* The forced speed (lower bits) in */
+ /* *mbps. Please use */
+ /* * ethtool_cmd_speed()/_set() to */
+ /* * access it */
+ u8 duplex; /* Duplex, half or full */
+ u8 auto_neg; /* Enable or disable autonegotiation */
+ enum hnae_loop loop_mode;
+ u8 tx_pause_en;
+ u8 tx_pause_time;
+ u8 rx_pause_en;
+ u8 pad_and_crc_en;
+ u8 promiscuous_en;
+ u8 port_en; /*port enable*/
+};
+
+struct mac_entry_idx {
+ u8 addr[ETH_ALEN];
+ u16 vlan_id:12;
+ u16 valid:1;
+ u16 qos:3;
+};
+
+struct mac_hw_stats {
+ u64 rx_good_pkts; /* only for xgmac */
+ u64 rx_good_bytes;
+ u64 rx_total_pkts; /* only for xgmac */
+ u64 rx_total_bytes; /* only for xgmac */
+ u64 rx_bad_bytes; /* only for gmac */
+ u64 rx_uc_pkts;
+ u64 rx_mc_pkts;
+ u64 rx_bc_pkts;
+ u64 rx_fragment_err; /* only for xgmac */
+ u64 rx_undersize; /* only for xgmac */
+ u64 rx_under_min;
+ u64 rx_minto64; /* only for gmac */
+ u64 rx_64bytes;
+ u64 rx_65to127;
+ u64 rx_128to255;
+ u64 rx_256to511;
+ u64 rx_512to1023;
+ u64 rx_1024to1518;
+ u64 rx_1519tomax;
+ u64 rx_1519tomax_good; /* only for xgmac */
+ u64 rx_oversize;
+ u64 rx_jabber_err;
+ u64 rx_fcs_err;
+ u64 rx_vlan_pkts; /* only for gmac */
+ u64 rx_data_err; /* only for gmac */
+ u64 rx_align_err; /* only for gmac */
+ u64 rx_long_err; /* only for gmac */
+ u64 rx_pfc_tc0;
+ u64 rx_pfc_tc1; /* only for xgmac */
+ u64 rx_pfc_tc2; /* only for xgmac */
+ u64 rx_pfc_tc3; /* only for xgmac */
+ u64 rx_pfc_tc4; /* only for xgmac */
+ u64 rx_pfc_tc5; /* only for xgmac */
+ u64 rx_pfc_tc6; /* only for xgmac */
+ u64 rx_pfc_tc7; /* only for xgmac */
+ u64 rx_unknown_ctrl;
+ u64 rx_filter_pkts; /* only for gmac */
+ u64 rx_filter_bytes; /* only for gmac */
+ u64 rx_fifo_overrun_err;/* only for gmac */
+ u64 rx_len_err; /* only for gmac */
+ u64 rx_comma_err; /* only for gmac */
+ u64 rx_symbol_err; /* only for xgmac */
+ u64 tx_good_to_sw; /* only for xgmac */
+ u64 tx_bad_to_sw; /* only for xgmac */
+ u64 rx_1731_pkts; /* only for xgmac */
+
+ u64 tx_good_bytes;
+ u64 tx_good_pkts; /* only for xgmac */
+ u64 tx_total_bytes; /* only for xgmac */
+ u64 tx_total_pkts; /* only for xgmac */
+ u64 tx_bad_bytes; /* only for gmac */
+ u64 tx_bad_pkts; /* only for xgmac */
+ u64 tx_uc_pkts;
+ u64 tx_mc_pkts;
+ u64 tx_bc_pkts;
+ u64 tx_undersize; /* only for xgmac */
+ u64 tx_fragment_err; /* only for xgmac */
+ u64 tx_under_min_pkts; /* only for gmac */
+ u64 tx_64bytes;
+ u64 tx_65to127;
+ u64 tx_128to255;
+ u64 tx_256to511;
+ u64 tx_512to1023;
+ u64 tx_1024to1518;
+ u64 tx_1519tomax;
+ u64 tx_1519tomax_good; /* only for xgmac */
+ u64 tx_oversize; /* only for xgmac */
+ u64 tx_jabber_err;
+ u64 tx_underrun_err; /* only for gmac */
+ u64 tx_vlan; /* only for gmac */
+ u64 tx_crc_err; /* only for gmac */
+ u64 tx_pfc_tc0;
+ u64 tx_pfc_tc1; /* only for xgmac */
+ u64 tx_pfc_tc2; /* only for xgmac */
+ u64 tx_pfc_tc3; /* only for xgmac */
+ u64 tx_pfc_tc4; /* only for xgmac */
+ u64 tx_pfc_tc5; /* only for xgmac */
+ u64 tx_pfc_tc6; /* only for xgmac */
+ u64 tx_pfc_tc7; /* only for xgmac */
+ u64 tx_ctrl; /* only for xgmac */
+ u64 tx_1731_pkts; /* only for xgmac */
+ u64 tx_1588_pkts; /* only for xgmac */
+ u64 rx_good_from_sw; /* only for xgmac */
+ u64 rx_bad_from_sw; /* only for xgmac */
+};
+
+struct hns_mac_cb {
+ struct device *dev;
+ struct dsaf_device *dsaf_dev;
+ struct mac_priv priv;
+ struct fwnode_handle *fw_port;
+ u8 __iomem *vaddr;
+ u8 __iomem *sys_ctl_vaddr;
+ u8 __iomem *serdes_vaddr;
+ struct regmap *serdes_ctrl;
+ struct regmap *cpld_ctrl;
+ char mc_mask[ETH_ALEN];
+ u32 cpld_ctrl_reg;
+ u32 port_rst_off;
+ u32 port_mode_off;
+ struct mac_entry_idx addr_entry_idx[DSAF_MAX_VM_NUM];
+ u8 sfp_prsnt;
+ u8 cpld_led_value;
+ u8 mac_id;
+
+ u8 link;
+ u8 half_duplex;
+ u16 speed;
+ u16 max_speed;
+ u16 max_frm;
+ u16 tx_pause_frm_time;
+ u32 if_support;
+ u64 txpkt_for_led;
+ u64 rxpkt_for_led;
+ enum hnae_port_type mac_type;
+ enum hnae_media_type media_type;
+ phy_interface_t phy_if;
+ enum hnae_loop loop_mode;
+
+ struct phy_device *phy_dev;
+
+ struct mac_hw_stats hw_stats;
+};
+
+struct mac_driver {
+ /*init Mac when init nic or dsaf*/
+ void (*mac_init)(void *mac_drv);
+ /*remove mac when remove nic or dsaf*/
+ void (*mac_free)(void *mac_drv);
+ /*enable mac when enable nic or dsaf*/
+ void (*mac_enable)(void *mac_drv, enum mac_commom_mode mode);
+ /*disable mac when disable nic or dsaf*/
+ void (*mac_disable)(void *mac_drv, enum mac_commom_mode mode);
+ /* config mac address*/
+ void (*set_mac_addr)(void *mac_drv, const char *mac_addr);
+ /*adjust mac mode of port,include speed and duplex*/
+ int (*adjust_link)(void *mac_drv, enum mac_speed speed,
+ u32 full_duplex);
+ /* need adjust link */
+ bool (*need_adjust_link)(void *mac_drv, enum mac_speed speed,
+ int duplex);
+ /* config autoegotaite mode of port*/
+ void (*set_an_mode)(void *mac_drv, u8 enable);
+ /* config loopbank mode */
+ int (*config_loopback)(void *mac_drv, enum hnae_loop loop_mode,
+ u8 enable);
+ /* config mtu*/
+ void (*config_max_frame_length)(void *mac_drv, u16 newval);
+ /*config PAD and CRC enable */
+ void (*config_pad_and_crc)(void *mac_drv, u8 newval);
+ /*config tx pause time,if pause_time is zero,disable tx pause enable*/
+ void (*set_tx_auto_pause_frames)(void *mac_drv, u16 pause_time);
+ /* config rx mode for promiscuous*/
+ void (*set_promiscuous)(void *mac_drv, u8 enable);
+ void (*mac_pausefrm_cfg)(void *mac_drv, u32 rx_en, u32 tx_en);
+
+ void (*autoneg_stat)(void *mac_drv, u32 *enable);
+ int (*set_pause_enable)(void *mac_drv, u32 rx_en, u32 tx_en);
+ void (*get_pause_enable)(void *mac_drv, u32 *rx_en, u32 *tx_en);
+ void (*get_link_status)(void *mac_drv, u32 *link_stat);
+ /* get the imporant regs*/
+ void (*get_regs)(void *mac_drv, void *data);
+ int (*get_regs_count)(void);
+ /* get strings name for ethtool statistic */
+ void (*get_strings)(u32 stringset, u8 *data);
+ /* get the number of strings*/
+ int (*get_sset_count)(int stringset);
+
+ /* get the statistic by ethtools*/
+ void (*get_ethtool_stats)(void *mac_drv, u64 *data);
+
+ /* get mac information */
+ void (*get_info)(void *mac_drv, struct mac_info *mac_info);
+
+ void (*update_stats)(void *mac_drv);
+ int (*wait_fifo_clean)(void *mac_drv);
+
+ enum mac_mode mac_mode;
+ u8 mac_id;
+ struct hns_mac_cb *mac_cb;
+ u8 __iomem *io_base;
+ unsigned int mac_en_flg;/*you'd better don't enable mac twice*/
+ unsigned int virt_dev_num;
+ struct device *dev;
+};
+
+struct mac_stats_string {
+ const char desc[ETH_GSTRING_LEN];
+ unsigned long offset;
+};
+
+#define MAC_MAKE_MODE(interface, speed) (enum mac_mode)((interface) | (speed))
+#define MAC_INTERFACE_FROM_MODE(mode) (enum mac_intf)((mode) & 0xFFFF0000)
+#define MAC_SPEED_FROM_MODE(mode) (enum mac_speed)((mode) & 0x0000FFFF)
+#define MAC_STATS_FIELD_OFF(field) (offsetof(struct mac_hw_stats, field))
+
+static inline struct mac_driver *hns_mac_get_drv(
+ const struct hns_mac_cb *mac_cb)
+{
+ return (struct mac_driver *)(mac_cb->priv.mac);
+}
+
+void *hns_gmac_config(struct hns_mac_cb *mac_cb,
+ struct mac_params *mac_param);
+void *hns_xgmac_config(struct hns_mac_cb *mac_cb,
+ struct mac_params *mac_param);
+
+int hns_mac_init(struct dsaf_device *dsaf_dev);
+bool hns_mac_need_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex);
+void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status);
+int hns_mac_change_vf_addr(struct hns_mac_cb *mac_cb, u32 vmid,
+ const char *addr);
+int hns_mac_set_multi(struct hns_mac_cb *mac_cb,
+ u32 port_num, char *addr, bool enable);
+int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vm, bool enable);
+void hns_mac_start(struct hns_mac_cb *mac_cb);
+void hns_mac_stop(struct hns_mac_cb *mac_cb);
+void hns_mac_uninit(struct dsaf_device *dsaf_dev);
+void hns_mac_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex);
+void hns_mac_reset(struct hns_mac_cb *mac_cb);
+void hns_mac_get_autoneg(struct hns_mac_cb *mac_cb, u32 *auto_neg);
+void hns_mac_get_pauseparam(struct hns_mac_cb *mac_cb, u32 *rx_en, u32 *tx_en);
+int hns_mac_set_autoneg(struct hns_mac_cb *mac_cb, u8 enable);
+int hns_mac_set_pauseparam(struct hns_mac_cb *mac_cb, u32 rx_en, u32 tx_en);
+int hns_mac_set_mtu(struct hns_mac_cb *mac_cb, u32 new_mtu, u32 buf_size);
+int hns_mac_get_port_info(struct hns_mac_cb *mac_cb,
+ u8 *auto_neg, u16 *speed, u8 *duplex);
+int hns_mac_config_mac_loopback(struct hns_mac_cb *mac_cb,
+ enum hnae_loop loop, int en);
+void hns_mac_update_stats(struct hns_mac_cb *mac_cb);
+void hns_mac_get_stats(struct hns_mac_cb *mac_cb, u64 *data);
+void hns_mac_get_strings(struct hns_mac_cb *mac_cb, int stringset, u8 *data);
+int hns_mac_get_sset_count(struct hns_mac_cb *mac_cb, int stringset);
+void hns_mac_get_regs(struct hns_mac_cb *mac_cb, void *data);
+int hns_mac_get_regs_count(struct hns_mac_cb *mac_cb);
+void hns_set_led_opt(struct hns_mac_cb *mac_cb);
+int hns_cpld_led_set_id(struct hns_mac_cb *mac_cb,
+ enum hnae_led_state status);
+void hns_mac_set_promisc(struct hns_mac_cb *mac_cb, u8 en);
+int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb,
+ u8 vmid, u8 *port_num);
+int hns_mac_add_uc_addr(struct hns_mac_cb *mac_cb, u8 vf_id,
+ const unsigned char *addr);
+int hns_mac_rm_uc_addr(struct hns_mac_cb *mac_cb, u8 vf_id,
+ const unsigned char *addr);
+int hns_mac_clr_multicast(struct hns_mac_cb *mac_cb, int vfn);
+void hns_mac_enable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode);
+void hns_mac_disable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode);
+int hns_mac_wait_fifo_clean(struct hns_mac_cb *mac_cb);
+
+#endif /* _HNS_DSAF_MAC_H */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
new file mode 100644
index 0000000000..fcaf5132b8
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -0,0 +1,3158 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ */
+
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/vmalloc.h>
+
+#include "hns_dsaf_mac.h"
+#include "hns_dsaf_main.h"
+#include "hns_dsaf_ppe.h"
+#include "hns_dsaf_rcb.h"
+#include "hns_dsaf_misc.h"
+
+static const char *g_dsaf_mode_match[DSAF_MODE_MAX] = {
+ [DSAF_MODE_DISABLE_2PORT_64VM] = "2port-64vf",
+ [DSAF_MODE_DISABLE_6PORT_0VM] = "6port-16rss",
+ [DSAF_MODE_DISABLE_6PORT_16VM] = "6port-16vf",
+ [DSAF_MODE_DISABLE_SP] = "single-port",
+};
+
+static const struct acpi_device_id hns_dsaf_acpi_match[] = {
+ { "HISI00B1", 0 },
+ { "HISI00B2", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, hns_dsaf_acpi_match);
+
+static int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
+{
+ int ret, i;
+ u32 desc_num;
+ u32 buf_size;
+ u32 reset_offset = 0;
+ u32 res_idx = 0;
+ const char *mode_str;
+ struct regmap *syscon;
+ struct resource *res;
+ struct device_node *np = dsaf_dev->dev->of_node, *np_temp;
+ struct platform_device *pdev = to_platform_device(dsaf_dev->dev);
+
+ if (dev_of_node(dsaf_dev->dev)) {
+ if (of_device_is_compatible(np, "hisilicon,hns-dsaf-v1"))
+ dsaf_dev->dsaf_ver = AE_VERSION_1;
+ else
+ dsaf_dev->dsaf_ver = AE_VERSION_2;
+ } else if (is_acpi_node(dsaf_dev->dev->fwnode)) {
+ if (acpi_dev_found(hns_dsaf_acpi_match[0].id))
+ dsaf_dev->dsaf_ver = AE_VERSION_1;
+ else if (acpi_dev_found(hns_dsaf_acpi_match[1].id))
+ dsaf_dev->dsaf_ver = AE_VERSION_2;
+ else
+ return -ENXIO;
+ } else {
+ dev_err(dsaf_dev->dev, "cannot get cfg data from of or acpi\n");
+ return -ENXIO;
+ }
+
+ ret = device_property_read_string(dsaf_dev->dev, "mode", &mode_str);
+ if (ret) {
+ dev_err(dsaf_dev->dev, "get dsaf mode fail, ret=%d!\n", ret);
+ return ret;
+ }
+ for (i = 0; i < DSAF_MODE_MAX; i++) {
+ if (g_dsaf_mode_match[i] &&
+ !strcmp(mode_str, g_dsaf_mode_match[i]))
+ break;
+ }
+ if (i >= DSAF_MODE_MAX ||
+ i == DSAF_MODE_INVALID || i == DSAF_MODE_ENABLE) {
+ dev_err(dsaf_dev->dev,
+ "%s prs mode str fail!\n", dsaf_dev->ae_dev.name);
+ return -EINVAL;
+ }
+ dsaf_dev->dsaf_mode = (enum dsaf_mode)i;
+
+ if (dsaf_dev->dsaf_mode > DSAF_MODE_ENABLE)
+ dsaf_dev->dsaf_en = HRD_DSAF_NO_DSAF_MODE;
+ else
+ dsaf_dev->dsaf_en = HRD_DSAF_MODE;
+
+ if ((i == DSAF_MODE_ENABLE_16VM) ||
+ (i == DSAF_MODE_DISABLE_2PORT_8VM) ||
+ (i == DSAF_MODE_DISABLE_6PORT_2VM))
+ dsaf_dev->dsaf_tc_mode = HRD_DSAF_8TC_MODE;
+ else
+ dsaf_dev->dsaf_tc_mode = HRD_DSAF_4TC_MODE;
+
+ if (dev_of_node(dsaf_dev->dev)) {
+ np_temp = of_parse_phandle(np, "subctrl-syscon", 0);
+ syscon = syscon_node_to_regmap(np_temp);
+ of_node_put(np_temp);
+ if (IS_ERR_OR_NULL(syscon)) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM,
+ res_idx++);
+ if (!res) {
+ dev_err(dsaf_dev->dev, "subctrl info is needed!\n");
+ return -ENOMEM;
+ }
+
+ dsaf_dev->sc_base = devm_ioremap_resource(&pdev->dev,
+ res);
+ if (IS_ERR(dsaf_dev->sc_base))
+ return PTR_ERR(dsaf_dev->sc_base);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM,
+ res_idx++);
+ if (!res) {
+ dev_err(dsaf_dev->dev, "serdes-ctrl info is needed!\n");
+ return -ENOMEM;
+ }
+
+ dsaf_dev->sds_base = devm_ioremap_resource(&pdev->dev,
+ res);
+ if (IS_ERR(dsaf_dev->sds_base))
+ return PTR_ERR(dsaf_dev->sds_base);
+ } else {
+ dsaf_dev->sub_ctrl = syscon;
+ }
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ppe-base");
+ if (!res) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, res_idx++);
+ if (!res) {
+ dev_err(dsaf_dev->dev, "ppe-base info is needed!\n");
+ return -ENOMEM;
+ }
+ }
+ dsaf_dev->ppe_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(dsaf_dev->ppe_base))
+ return PTR_ERR(dsaf_dev->ppe_base);
+ dsaf_dev->ppe_paddr = res->start;
+
+ if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "dsaf-base");
+ if (!res) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM,
+ res_idx);
+ if (!res) {
+ dev_err(dsaf_dev->dev,
+ "dsaf-base info is needed!\n");
+ return -ENOMEM;
+ }
+ }
+ dsaf_dev->io_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(dsaf_dev->io_base))
+ return PTR_ERR(dsaf_dev->io_base);
+ }
+
+ ret = device_property_read_u32(dsaf_dev->dev, "desc-num", &desc_num);
+ if (ret < 0 || desc_num < HNS_DSAF_MIN_DESC_CNT ||
+ desc_num > HNS_DSAF_MAX_DESC_CNT) {
+ dev_err(dsaf_dev->dev, "get desc-num(%d) fail, ret=%d!\n",
+ desc_num, ret);
+ return -EINVAL;
+ }
+ dsaf_dev->desc_num = desc_num;
+
+ ret = device_property_read_u32(dsaf_dev->dev, "reset-field-offset",
+ &reset_offset);
+ if (ret < 0) {
+ dev_dbg(dsaf_dev->dev,
+ "get reset-field-offset fail, ret=%d!\r\n", ret);
+ }
+ dsaf_dev->reset_offset = reset_offset;
+
+ ret = device_property_read_u32(dsaf_dev->dev, "buf-size", &buf_size);
+ if (ret < 0) {
+ dev_err(dsaf_dev->dev,
+ "get buf-size fail, ret=%d!\r\n", ret);
+ return ret;
+ }
+ dsaf_dev->buf_size = buf_size;
+
+ dsaf_dev->buf_size_type = hns_rcb_buf_size2type(buf_size);
+ if (dsaf_dev->buf_size_type < 0) {
+ dev_err(dsaf_dev->dev,
+ "buf_size(%d) is wrong!\n", buf_size);
+ return -EINVAL;
+ }
+
+ dsaf_dev->misc_op = hns_misc_op_get(dsaf_dev);
+ if (!dsaf_dev->misc_op)
+ return -ENOMEM;
+
+ if (!dma_set_mask_and_coherent(dsaf_dev->dev, DMA_BIT_MASK(64ULL)))
+ dev_dbg(dsaf_dev->dev, "set mask to 64bit\n");
+ else
+ dev_err(dsaf_dev->dev, "set mask to 64bit fail!\n");
+
+ return 0;
+}
+
+/**
+ * hns_dsaf_sbm_link_sram_init_en - config dsaf_sbm_init_en
+ * @dsaf_dev: dsa fabric id
+ */
+static void hns_dsaf_sbm_link_sram_init_en(struct dsaf_device *dsaf_dev)
+{
+ dsaf_set_dev_bit(dsaf_dev, DSAF_CFG_0_REG, DSAF_CFG_SBM_INIT_S, 1);
+}
+
+/**
+ * hns_dsaf_reg_cnt_clr_ce - config hns_dsaf_reg_cnt_clr_ce
+ * @dsaf_dev: dsa fabric id
+ * @reg_cnt_clr_ce: config value
+ */
+static void
+hns_dsaf_reg_cnt_clr_ce(struct dsaf_device *dsaf_dev, u32 reg_cnt_clr_ce)
+{
+ dsaf_set_dev_bit(dsaf_dev, DSAF_DSA_REG_CNT_CLR_CE_REG,
+ DSAF_CNT_CLR_CE_S, reg_cnt_clr_ce);
+}
+
+/**
+ * hns_dsaf_ppe_qid_cfg - config ppe qid
+ * @dsaf_dev: dsa fabric id
+ * @qid_cfg: value array
+ */
+static void
+hns_dsaf_ppe_qid_cfg(struct dsaf_device *dsaf_dev, u32 qid_cfg)
+{
+ u32 i;
+
+ for (i = 0; i < DSAF_COMM_CHN; i++) {
+ dsaf_set_dev_field(dsaf_dev,
+ DSAF_PPE_QID_CFG_0_REG + 0x0004 * i,
+ DSAF_PPE_QID_CFG_M, DSAF_PPE_QID_CFG_S,
+ qid_cfg);
+ }
+}
+
+static void hns_dsaf_mix_def_qid_cfg(struct dsaf_device *dsaf_dev)
+{
+ u16 max_q_per_vf, max_vfn;
+ u32 q_id, q_num_per_port;
+ u32 i;
+
+ hns_rcb_get_queue_mode(dsaf_dev->dsaf_mode, &max_vfn, &max_q_per_vf);
+ q_num_per_port = max_vfn * max_q_per_vf;
+
+ for (i = 0, q_id = 0; i < DSAF_SERVICE_NW_NUM; i++) {
+ dsaf_set_dev_field(dsaf_dev,
+ DSAF_MIX_DEF_QID_0_REG + 0x0004 * i,
+ 0xff, 0, q_id);
+ q_id += q_num_per_port;
+ }
+}
+
+static void hns_dsaf_inner_qid_cfg(struct dsaf_device *dsaf_dev)
+{
+ u16 max_q_per_vf, max_vfn;
+ u32 q_id, q_num_per_port;
+ u32 mac_id;
+
+ if (AE_IS_VER1(dsaf_dev->dsaf_ver))
+ return;
+
+ hns_rcb_get_queue_mode(dsaf_dev->dsaf_mode, &max_vfn, &max_q_per_vf);
+ q_num_per_port = max_vfn * max_q_per_vf;
+
+ for (mac_id = 0, q_id = 0; mac_id < DSAF_SERVICE_NW_NUM; mac_id++) {
+ dsaf_set_dev_field(dsaf_dev,
+ DSAFV2_SERDES_LBK_0_REG + 4 * mac_id,
+ DSAFV2_SERDES_LBK_QID_M,
+ DSAFV2_SERDES_LBK_QID_S,
+ q_id);
+ q_id += q_num_per_port;
+ }
+}
+
+/**
+ * hns_dsaf_sw_port_type_cfg - cfg sw type
+ * @dsaf_dev: dsa fabric id
+ * @port_type: array
+ */
+static void hns_dsaf_sw_port_type_cfg(struct dsaf_device *dsaf_dev,
+ enum dsaf_sw_port_type port_type)
+{
+ u32 i;
+
+ for (i = 0; i < DSAF_SW_PORT_NUM; i++) {
+ dsaf_set_dev_field(dsaf_dev,
+ DSAF_SW_PORT_TYPE_0_REG + 0x0004 * i,
+ DSAF_SW_PORT_TYPE_M, DSAF_SW_PORT_TYPE_S,
+ port_type);
+ }
+}
+
+/**
+ * hns_dsaf_stp_port_type_cfg - cfg stp type
+ * @dsaf_dev: dsa fabric id
+ * @port_type: array
+ */
+static void hns_dsaf_stp_port_type_cfg(struct dsaf_device *dsaf_dev,
+ enum dsaf_stp_port_type port_type)
+{
+ u32 i;
+
+ for (i = 0; i < DSAF_COMM_CHN; i++) {
+ dsaf_set_dev_field(dsaf_dev,
+ DSAF_STP_PORT_TYPE_0_REG + 0x0004 * i,
+ DSAF_STP_PORT_TYPE_M, DSAF_STP_PORT_TYPE_S,
+ port_type);
+ }
+}
+
+#define HNS_DSAF_SBM_NUM(dev) \
+ (AE_IS_VER1((dev)->dsaf_ver) ? DSAF_SBM_NUM : DSAFV2_SBM_NUM)
+/**
+ * hns_dsaf_sbm_cfg - config sbm
+ * @dsaf_dev: dsa fabric id
+ */
+static void hns_dsaf_sbm_cfg(struct dsaf_device *dsaf_dev)
+{
+ u32 o_sbm_cfg;
+ u32 i;
+
+ for (i = 0; i < HNS_DSAF_SBM_NUM(dsaf_dev); i++) {
+ o_sbm_cfg = dsaf_read_dev(dsaf_dev,
+ DSAF_SBM_CFG_REG_0_REG + 0x80 * i);
+ dsaf_set_bit(o_sbm_cfg, DSAF_SBM_CFG_EN_S, 1);
+ dsaf_set_bit(o_sbm_cfg, DSAF_SBM_CFG_SHCUT_EN_S, 0);
+ dsaf_write_dev(dsaf_dev,
+ DSAF_SBM_CFG_REG_0_REG + 0x80 * i, o_sbm_cfg);
+ }
+}
+
+/**
+ * hns_dsaf_sbm_cfg_mib_en - config sbm
+ * @dsaf_dev: dsa fabric id
+ */
+static int hns_dsaf_sbm_cfg_mib_en(struct dsaf_device *dsaf_dev)
+{
+ u32 sbm_cfg_mib_en;
+ u32 i;
+ u32 reg;
+ u32 read_cnt;
+
+ /* validate configure by setting SBM_CFG_MIB_EN bit from 0 to 1. */
+ for (i = 0; i < HNS_DSAF_SBM_NUM(dsaf_dev); i++) {
+ reg = DSAF_SBM_CFG_REG_0_REG + 0x80 * i;
+ dsaf_set_dev_bit(dsaf_dev, reg, DSAF_SBM_CFG_MIB_EN_S, 0);
+ }
+
+ for (i = 0; i < HNS_DSAF_SBM_NUM(dsaf_dev); i++) {
+ reg = DSAF_SBM_CFG_REG_0_REG + 0x80 * i;
+ dsaf_set_dev_bit(dsaf_dev, reg, DSAF_SBM_CFG_MIB_EN_S, 1);
+ }
+
+ /* waitint for all sbm enable finished */
+ for (i = 0; i < HNS_DSAF_SBM_NUM(dsaf_dev); i++) {
+ read_cnt = 0;
+ reg = DSAF_SBM_CFG_REG_0_REG + 0x80 * i;
+ do {
+ udelay(1);
+ sbm_cfg_mib_en = dsaf_get_dev_bit(
+ dsaf_dev, reg, DSAF_SBM_CFG_MIB_EN_S);
+ read_cnt++;
+ } while (sbm_cfg_mib_en == 0 &&
+ read_cnt < DSAF_CFG_READ_CNT);
+
+ if (sbm_cfg_mib_en == 0) {
+ dev_err(dsaf_dev->dev,
+ "sbm_cfg_mib_en fail,%s,sbm_num=%d\n",
+ dsaf_dev->ae_dev.name, i);
+ return -ENODEV;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * hns_dsaf_sbm_bp_wl_cfg - config sbm
+ * @dsaf_dev: dsa fabric id
+ */
+static void hns_dsaf_sbm_bp_wl_cfg(struct dsaf_device *dsaf_dev)
+{
+ u32 o_sbm_bp_cfg;
+ u32 reg;
+ u32 i;
+
+ /* XGE */
+ for (i = 0; i < DSAF_XGE_NUM; i++) {
+ reg = DSAF_SBM_BP_CFG_0_XGE_REG_0_REG + 0x80 * i;
+ o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg);
+ dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG0_COM_MAX_BUF_NUM_M,
+ DSAF_SBM_CFG0_COM_MAX_BUF_NUM_S, 512);
+ dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG0_VC0_MAX_BUF_NUM_M,
+ DSAF_SBM_CFG0_VC0_MAX_BUF_NUM_S, 0);
+ dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG0_VC1_MAX_BUF_NUM_M,
+ DSAF_SBM_CFG0_VC1_MAX_BUF_NUM_S, 0);
+ dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg);
+
+ reg = DSAF_SBM_BP_CFG_1_REG_0_REG + 0x80 * i;
+ o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg);
+ dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG1_TC4_MAX_BUF_NUM_M,
+ DSAF_SBM_CFG1_TC4_MAX_BUF_NUM_S, 0);
+ dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG1_TC0_MAX_BUF_NUM_M,
+ DSAF_SBM_CFG1_TC0_MAX_BUF_NUM_S, 0);
+ dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg);
+
+ reg = DSAF_SBM_BP_CFG_2_XGE_REG_0_REG + 0x80 * i;
+ o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg);
+ dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG2_SET_BUF_NUM_M,
+ DSAF_SBM_CFG2_SET_BUF_NUM_S, 104);
+ dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG2_RESET_BUF_NUM_M,
+ DSAF_SBM_CFG2_RESET_BUF_NUM_S, 128);
+ dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg);
+
+ reg = DSAF_SBM_BP_CFG_3_REG_0_REG + 0x80 * i;
+ o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg);
+ dsaf_set_field(o_sbm_bp_cfg,
+ DSAF_SBM_CFG3_SET_BUF_NUM_NO_PFC_M,
+ DSAF_SBM_CFG3_SET_BUF_NUM_NO_PFC_S, 110);
+ dsaf_set_field(o_sbm_bp_cfg,
+ DSAF_SBM_CFG3_RESET_BUF_NUM_NO_PFC_M,
+ DSAF_SBM_CFG3_RESET_BUF_NUM_NO_PFC_S, 160);
+ dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg);
+
+ /* for no enable pfc mode */
+ reg = DSAF_SBM_BP_CFG_4_REG_0_REG + 0x80 * i;
+ o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg);
+ dsaf_set_field(o_sbm_bp_cfg,
+ DSAF_SBM_CFG3_SET_BUF_NUM_NO_PFC_M,
+ DSAF_SBM_CFG3_SET_BUF_NUM_NO_PFC_S, 128);
+ dsaf_set_field(o_sbm_bp_cfg,
+ DSAF_SBM_CFG3_RESET_BUF_NUM_NO_PFC_M,
+ DSAF_SBM_CFG3_RESET_BUF_NUM_NO_PFC_S, 192);
+ dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg);
+ }
+
+ /* PPE */
+ for (i = 0; i < DSAF_COMM_CHN; i++) {
+ reg = DSAF_SBM_BP_CFG_2_PPE_REG_0_REG + 0x80 * i;
+ o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg);
+ dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG2_SET_BUF_NUM_M,
+ DSAF_SBM_CFG2_SET_BUF_NUM_S, 10);
+ dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG2_RESET_BUF_NUM_M,
+ DSAF_SBM_CFG2_RESET_BUF_NUM_S, 12);
+ dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg);
+ }
+
+ /* RoCEE */
+ for (i = 0; i < DSAF_COMM_CHN; i++) {
+ reg = DSAF_SBM_BP_CFG_2_ROCEE_REG_0_REG + 0x80 * i;
+ o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg);
+ dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG2_SET_BUF_NUM_M,
+ DSAF_SBM_CFG2_SET_BUF_NUM_S, 2);
+ dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG2_RESET_BUF_NUM_M,
+ DSAF_SBM_CFG2_RESET_BUF_NUM_S, 4);
+ dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg);
+ }
+}
+
+static void hns_dsafv2_sbm_bp_wl_cfg(struct dsaf_device *dsaf_dev)
+{
+ u32 o_sbm_bp_cfg;
+ u32 reg;
+ u32 i;
+
+ /* XGE */
+ for (i = 0; i < DSAFV2_SBM_XGE_CHN; i++) {
+ reg = DSAF_SBM_BP_CFG_0_XGE_REG_0_REG + 0x80 * i;
+ o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg);
+ dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG0_COM_MAX_BUF_NUM_M,
+ DSAFV2_SBM_CFG0_COM_MAX_BUF_NUM_S, 256);
+ dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG0_VC0_MAX_BUF_NUM_M,
+ DSAFV2_SBM_CFG0_VC0_MAX_BUF_NUM_S, 0);
+ dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG0_VC1_MAX_BUF_NUM_M,
+ DSAFV2_SBM_CFG0_VC1_MAX_BUF_NUM_S, 0);
+ dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg);
+
+ reg = DSAF_SBM_BP_CFG_1_REG_0_REG + 0x80 * i;
+ o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg);
+ dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG1_TC4_MAX_BUF_NUM_M,
+ DSAFV2_SBM_CFG1_TC4_MAX_BUF_NUM_S, 0);
+ dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG1_TC0_MAX_BUF_NUM_M,
+ DSAFV2_SBM_CFG1_TC0_MAX_BUF_NUM_S, 0);
+ dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg);
+
+ reg = DSAF_SBM_BP_CFG_2_XGE_REG_0_REG + 0x80 * i;
+ o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg);
+ dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG2_SET_BUF_NUM_M,
+ DSAFV2_SBM_CFG2_SET_BUF_NUM_S, 104);
+ dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG2_RESET_BUF_NUM_M,
+ DSAFV2_SBM_CFG2_RESET_BUF_NUM_S, 128);
+ dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg);
+
+ reg = DSAF_SBM_BP_CFG_3_REG_0_REG + 0x80 * i;
+ o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg);
+ dsaf_set_field(o_sbm_bp_cfg,
+ DSAFV2_SBM_CFG3_SET_BUF_NUM_NO_PFC_M,
+ DSAFV2_SBM_CFG3_SET_BUF_NUM_NO_PFC_S, 55);
+ dsaf_set_field(o_sbm_bp_cfg,
+ DSAFV2_SBM_CFG3_RESET_BUF_NUM_NO_PFC_M,
+ DSAFV2_SBM_CFG3_RESET_BUF_NUM_NO_PFC_S, 110);
+ dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg);
+
+ /* for no enable pfc mode */
+ reg = DSAF_SBM_BP_CFG_4_REG_0_REG + 0x80 * i;
+ o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg);
+ dsaf_set_field(o_sbm_bp_cfg,
+ DSAFV2_SBM_CFG4_SET_BUF_NUM_NO_PFC_M,
+ DSAFV2_SBM_CFG4_SET_BUF_NUM_NO_PFC_S, 128);
+ dsaf_set_field(o_sbm_bp_cfg,
+ DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_M,
+ DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_S, 192);
+ dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg);
+ }
+
+ /* PPE */
+ for (i = 0; i < DSAFV2_SBM_PPE_CHN; i++) {
+ reg = DSAF_SBM_BP_CFG_2_PPE_REG_0_REG + 0x80 * i;
+ o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg);
+ dsaf_set_field(o_sbm_bp_cfg,
+ DSAFV2_SBM_CFG2_PPE_SET_BUF_NUM_M,
+ DSAFV2_SBM_CFG2_PPE_SET_BUF_NUM_S, 2);
+ dsaf_set_field(o_sbm_bp_cfg,
+ DSAFV2_SBM_CFG2_PPE_RESET_BUF_NUM_M,
+ DSAFV2_SBM_CFG2_PPE_RESET_BUF_NUM_S, 3);
+ dsaf_set_field(o_sbm_bp_cfg,
+ DSAFV2_SBM_CFG2_PPE_CFG_USEFUL_NUM_M,
+ DSAFV2_SBM_CFG2_PPE_CFG_USEFUL_NUM_S, 52);
+ dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg);
+ }
+
+ /* RoCEE */
+ for (i = 0; i < DASFV2_ROCEE_CRD_NUM; i++) {
+ reg = DSAFV2_SBM_BP_CFG_2_ROCEE_REG_0_REG + 0x80 * i;
+ o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg);
+ dsaf_set_field(o_sbm_bp_cfg,
+ DSAFV2_SBM_CFG2_ROCEE_SET_BUF_NUM_M,
+ DSAFV2_SBM_CFG2_ROCEE_SET_BUF_NUM_S, 2);
+ dsaf_set_field(o_sbm_bp_cfg,
+ DSAFV2_SBM_CFG2_ROCEE_RESET_BUF_NUM_M,
+ DSAFV2_SBM_CFG2_ROCEE_RESET_BUF_NUM_S, 4);
+ dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg);
+ }
+}
+
+/**
+ * hns_dsaf_voq_bp_all_thrd_cfg - voq
+ * @dsaf_dev: dsa fabric id
+ */
+static void hns_dsaf_voq_bp_all_thrd_cfg(struct dsaf_device *dsaf_dev)
+{
+ u32 voq_bp_all_thrd;
+ u32 i;
+
+ for (i = 0; i < DSAF_VOQ_NUM; i++) {
+ voq_bp_all_thrd = dsaf_read_dev(
+ dsaf_dev, DSAF_VOQ_BP_ALL_THRD_0_REG + 0x40 * i);
+ if (i < DSAF_XGE_NUM) {
+ dsaf_set_field(voq_bp_all_thrd,
+ DSAF_VOQ_BP_ALL_DOWNTHRD_M,
+ DSAF_VOQ_BP_ALL_DOWNTHRD_S, 930);
+ dsaf_set_field(voq_bp_all_thrd,
+ DSAF_VOQ_BP_ALL_UPTHRD_M,
+ DSAF_VOQ_BP_ALL_UPTHRD_S, 950);
+ } else {
+ dsaf_set_field(voq_bp_all_thrd,
+ DSAF_VOQ_BP_ALL_DOWNTHRD_M,
+ DSAF_VOQ_BP_ALL_DOWNTHRD_S, 220);
+ dsaf_set_field(voq_bp_all_thrd,
+ DSAF_VOQ_BP_ALL_UPTHRD_M,
+ DSAF_VOQ_BP_ALL_UPTHRD_S, 230);
+ }
+ dsaf_write_dev(
+ dsaf_dev, DSAF_VOQ_BP_ALL_THRD_0_REG + 0x40 * i,
+ voq_bp_all_thrd);
+ }
+}
+
+static void hns_dsaf_tbl_tcam_match_cfg(
+ struct dsaf_device *dsaf_dev,
+ struct dsaf_tbl_tcam_data *ptbl_tcam_data)
+{
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MATCH_CFG_L_REG,
+ ptbl_tcam_data->tbl_tcam_data_low);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MATCH_CFG_H_REG,
+ ptbl_tcam_data->tbl_tcam_data_high);
+}
+
+/**
+ * hns_dsaf_tbl_tcam_data_cfg - tbl
+ * @dsaf_dev: dsa fabric id
+ * @ptbl_tcam_data: addr
+ */
+static void hns_dsaf_tbl_tcam_data_cfg(
+ struct dsaf_device *dsaf_dev,
+ struct dsaf_tbl_tcam_data *ptbl_tcam_data)
+{
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_LOW_0_REG,
+ ptbl_tcam_data->tbl_tcam_data_low);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_HIGH_0_REG,
+ ptbl_tcam_data->tbl_tcam_data_high);
+}
+
+/**
+ * hns_dsaf_tbl_tcam_mcast_cfg - tbl
+ * @dsaf_dev: dsa fabric id
+ * @mcast: addr
+ */
+static void hns_dsaf_tbl_tcam_mcast_cfg(
+ struct dsaf_device *dsaf_dev,
+ struct dsaf_tbl_tcam_mcast_cfg *mcast)
+{
+ u32 mcast_cfg4;
+
+ mcast_cfg4 = dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_4_0_REG);
+ dsaf_set_bit(mcast_cfg4, DSAF_TBL_MCAST_CFG4_ITEM_VLD_S,
+ mcast->tbl_mcast_item_vld);
+ dsaf_set_bit(mcast_cfg4, DSAF_TBL_MCAST_CFG4_OLD_EN_S,
+ mcast->tbl_mcast_old_en);
+ dsaf_set_field(mcast_cfg4, DSAF_TBL_MCAST_CFG4_VM128_112_M,
+ DSAF_TBL_MCAST_CFG4_VM128_112_S,
+ mcast->tbl_mcast_port_msk[4]);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_4_0_REG, mcast_cfg4);
+
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_3_0_REG,
+ mcast->tbl_mcast_port_msk[3]);
+
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_2_0_REG,
+ mcast->tbl_mcast_port_msk[2]);
+
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_1_0_REG,
+ mcast->tbl_mcast_port_msk[1]);
+
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_0_0_REG,
+ mcast->tbl_mcast_port_msk[0]);
+}
+
+/**
+ * hns_dsaf_tbl_tcam_ucast_cfg - tbl
+ * @dsaf_dev: dsa fabric id
+ * @tbl_tcam_ucast: addr
+ */
+static void hns_dsaf_tbl_tcam_ucast_cfg(
+ struct dsaf_device *dsaf_dev,
+ struct dsaf_tbl_tcam_ucast_cfg *tbl_tcam_ucast)
+{
+ u32 ucast_cfg1;
+
+ ucast_cfg1 = dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_UCAST_CFG_0_REG);
+ dsaf_set_bit(ucast_cfg1, DSAF_TBL_UCAST_CFG1_MAC_DISCARD_S,
+ tbl_tcam_ucast->tbl_ucast_mac_discard);
+ dsaf_set_bit(ucast_cfg1, DSAF_TBL_UCAST_CFG1_ITEM_VLD_S,
+ tbl_tcam_ucast->tbl_ucast_item_vld);
+ dsaf_set_bit(ucast_cfg1, DSAF_TBL_UCAST_CFG1_OLD_EN_S,
+ tbl_tcam_ucast->tbl_ucast_old_en);
+ dsaf_set_bit(ucast_cfg1, DSAF_TBL_UCAST_CFG1_DVC_S,
+ tbl_tcam_ucast->tbl_ucast_dvc);
+ dsaf_set_field(ucast_cfg1, DSAF_TBL_UCAST_CFG1_OUT_PORT_M,
+ DSAF_TBL_UCAST_CFG1_OUT_PORT_S,
+ tbl_tcam_ucast->tbl_ucast_out_port);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_UCAST_CFG_0_REG, ucast_cfg1);
+}
+
+/**
+ * hns_dsaf_tbl_line_cfg - tbl
+ * @dsaf_dev: dsa fabric id
+ * @tbl_lin: addr
+ */
+static void hns_dsaf_tbl_line_cfg(struct dsaf_device *dsaf_dev,
+ struct dsaf_tbl_line_cfg *tbl_lin)
+{
+ u32 tbl_line;
+
+ tbl_line = dsaf_read_dev(dsaf_dev, DSAF_TBL_LIN_CFG_0_REG);
+ dsaf_set_bit(tbl_line, DSAF_TBL_LINE_CFG_MAC_DISCARD_S,
+ tbl_lin->tbl_line_mac_discard);
+ dsaf_set_bit(tbl_line, DSAF_TBL_LINE_CFG_DVC_S,
+ tbl_lin->tbl_line_dvc);
+ dsaf_set_field(tbl_line, DSAF_TBL_LINE_CFG_OUT_PORT_M,
+ DSAF_TBL_LINE_CFG_OUT_PORT_S,
+ tbl_lin->tbl_line_out_port);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_LIN_CFG_0_REG, tbl_line);
+}
+
+/**
+ * hns_dsaf_tbl_tcam_mcast_pul - tbl
+ * @dsaf_dev: dsa fabric id
+ */
+static void hns_dsaf_tbl_tcam_mcast_pul(struct dsaf_device *dsaf_dev)
+{
+ u32 o_tbl_pul;
+
+ o_tbl_pul = dsaf_read_dev(dsaf_dev, DSAF_TBL_PUL_0_REG);
+ dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_MCAST_VLD_S, 1);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, o_tbl_pul);
+ dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_MCAST_VLD_S, 0);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, o_tbl_pul);
+}
+
+/**
+ * hns_dsaf_tbl_line_pul - tbl
+ * @dsaf_dev: dsa fabric id
+ */
+static void hns_dsaf_tbl_line_pul(struct dsaf_device *dsaf_dev)
+{
+ u32 tbl_pul;
+
+ tbl_pul = dsaf_read_dev(dsaf_dev, DSAF_TBL_PUL_0_REG);
+ dsaf_set_bit(tbl_pul, DSAF_TBL_PUL_LINE_VLD_S, 1);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, tbl_pul);
+ dsaf_set_bit(tbl_pul, DSAF_TBL_PUL_LINE_VLD_S, 0);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, tbl_pul);
+}
+
+/**
+ * hns_dsaf_tbl_tcam_data_mcast_pul - tbl
+ * @dsaf_dev: dsa fabric id
+ */
+static void hns_dsaf_tbl_tcam_data_mcast_pul(
+ struct dsaf_device *dsaf_dev)
+{
+ u32 o_tbl_pul;
+
+ o_tbl_pul = dsaf_read_dev(dsaf_dev, DSAF_TBL_PUL_0_REG);
+ dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_TCAM_DATA_VLD_S, 1);
+ dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_MCAST_VLD_S, 1);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, o_tbl_pul);
+ dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_TCAM_DATA_VLD_S, 0);
+ dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_MCAST_VLD_S, 0);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, o_tbl_pul);
+}
+
+/**
+ * hns_dsaf_tbl_tcam_data_ucast_pul - tbl
+ * @dsaf_dev: dsa fabric id
+ */
+static void hns_dsaf_tbl_tcam_data_ucast_pul(
+ struct dsaf_device *dsaf_dev)
+{
+ u32 o_tbl_pul;
+
+ o_tbl_pul = dsaf_read_dev(dsaf_dev, DSAF_TBL_PUL_0_REG);
+ dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_TCAM_DATA_VLD_S, 1);
+ dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_UCAST_VLD_S, 1);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, o_tbl_pul);
+ dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_TCAM_DATA_VLD_S, 0);
+ dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_UCAST_VLD_S, 0);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, o_tbl_pul);
+}
+
+void hns_dsaf_set_promisc_mode(struct dsaf_device *dsaf_dev, u32 en)
+{
+ if (AE_IS_VER1(dsaf_dev->dsaf_ver) && !HNS_DSAF_IS_DEBUG(dsaf_dev))
+ dsaf_set_dev_bit(dsaf_dev, DSAF_CFG_0_REG,
+ DSAF_CFG_MIX_MODE_S, !!en);
+}
+
+/**
+ * hns_dsaf_tbl_stat_en - tbl
+ * @dsaf_dev: dsa fabric id
+ */
+static void hns_dsaf_tbl_stat_en(struct dsaf_device *dsaf_dev)
+{
+ u32 o_tbl_ctrl;
+
+ o_tbl_ctrl = dsaf_read_dev(dsaf_dev, DSAF_TBL_DFX_CTRL_0_REG);
+ dsaf_set_bit(o_tbl_ctrl, DSAF_TBL_DFX_LINE_LKUP_NUM_EN_S, 1);
+ dsaf_set_bit(o_tbl_ctrl, DSAF_TBL_DFX_UC_LKUP_NUM_EN_S, 1);
+ dsaf_set_bit(o_tbl_ctrl, DSAF_TBL_DFX_MC_LKUP_NUM_EN_S, 1);
+ dsaf_set_bit(o_tbl_ctrl, DSAF_TBL_DFX_BC_LKUP_NUM_EN_S, 1);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_DFX_CTRL_0_REG, o_tbl_ctrl);
+}
+
+/**
+ * hns_dsaf_rocee_bp_en - rocee back press enable
+ * @dsaf_dev: dsa fabric id
+ */
+static void hns_dsaf_rocee_bp_en(struct dsaf_device *dsaf_dev)
+{
+ if (AE_IS_VER1(dsaf_dev->dsaf_ver))
+ dsaf_set_dev_bit(dsaf_dev, DSAF_XGE_CTRL_SIG_CFG_0_REG,
+ DSAF_FC_XGE_TX_PAUSE_S, 1);
+}
+
+/* set msk for dsaf exception irq*/
+static void hns_dsaf_int_xge_msk_set(struct dsaf_device *dsaf_dev,
+ u32 chnn_num, u32 mask_set)
+{
+ dsaf_write_dev(dsaf_dev,
+ DSAF_XGE_INT_MSK_0_REG + 0x4 * chnn_num, mask_set);
+}
+
+static void hns_dsaf_int_ppe_msk_set(struct dsaf_device *dsaf_dev,
+ u32 chnn_num, u32 msk_set)
+{
+ dsaf_write_dev(dsaf_dev,
+ DSAF_PPE_INT_MSK_0_REG + 0x4 * chnn_num, msk_set);
+}
+
+static void hns_dsaf_int_rocee_msk_set(struct dsaf_device *dsaf_dev,
+ u32 chnn, u32 msk_set)
+{
+ dsaf_write_dev(dsaf_dev,
+ DSAF_ROCEE_INT_MSK_0_REG + 0x4 * chnn, msk_set);
+}
+
+static void
+hns_dsaf_int_tbl_msk_set(struct dsaf_device *dsaf_dev, u32 msk_set)
+{
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_INT_MSK_0_REG, msk_set);
+}
+
+/* clr dsaf exception irq*/
+static void hns_dsaf_int_xge_src_clr(struct dsaf_device *dsaf_dev,
+ u32 chnn_num, u32 int_src)
+{
+ dsaf_write_dev(dsaf_dev,
+ DSAF_XGE_INT_SRC_0_REG + 0x4 * chnn_num, int_src);
+}
+
+static void hns_dsaf_int_ppe_src_clr(struct dsaf_device *dsaf_dev,
+ u32 chnn, u32 int_src)
+{
+ dsaf_write_dev(dsaf_dev,
+ DSAF_PPE_INT_SRC_0_REG + 0x4 * chnn, int_src);
+}
+
+static void hns_dsaf_int_rocee_src_clr(struct dsaf_device *dsaf_dev,
+ u32 chnn, u32 int_src)
+{
+ dsaf_write_dev(dsaf_dev,
+ DSAF_ROCEE_INT_SRC_0_REG + 0x4 * chnn, int_src);
+}
+
+static void hns_dsaf_int_tbl_src_clr(struct dsaf_device *dsaf_dev,
+ u32 int_src)
+{
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_INT_SRC_0_REG, int_src);
+}
+
+/**
+ * hns_dsaf_single_line_tbl_cfg - INT
+ * @dsaf_dev: dsa fabric id
+ * @address: the address
+ * @ptbl_line: the line
+ */
+static void hns_dsaf_single_line_tbl_cfg(
+ struct dsaf_device *dsaf_dev,
+ u32 address, struct dsaf_tbl_line_cfg *ptbl_line)
+{
+ spin_lock_bh(&dsaf_dev->tcam_lock);
+
+ /*Write Addr*/
+ hns_dsaf_tbl_line_addr_cfg(dsaf_dev, address);
+
+ /*Write Line*/
+ hns_dsaf_tbl_line_cfg(dsaf_dev, ptbl_line);
+
+ /*Write Plus*/
+ hns_dsaf_tbl_line_pul(dsaf_dev);
+
+ spin_unlock_bh(&dsaf_dev->tcam_lock);
+}
+
+/**
+ * hns_dsaf_tcam_uc_cfg - INT
+ * @dsaf_dev: dsa fabric id
+ * @address: the address
+ * @ptbl_tcam_data: the data
+ * @ptbl_tcam_ucast: unicast
+ */
+static void hns_dsaf_tcam_uc_cfg(
+ struct dsaf_device *dsaf_dev, u32 address,
+ struct dsaf_tbl_tcam_data *ptbl_tcam_data,
+ struct dsaf_tbl_tcam_ucast_cfg *ptbl_tcam_ucast)
+{
+ spin_lock_bh(&dsaf_dev->tcam_lock);
+
+ /*Write Addr*/
+ hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
+ /*Write Tcam Data*/
+ hns_dsaf_tbl_tcam_data_cfg(dsaf_dev, ptbl_tcam_data);
+ /*Write Tcam Ucast*/
+ hns_dsaf_tbl_tcam_ucast_cfg(dsaf_dev, ptbl_tcam_ucast);
+ /*Write Plus*/
+ hns_dsaf_tbl_tcam_data_ucast_pul(dsaf_dev);
+
+ spin_unlock_bh(&dsaf_dev->tcam_lock);
+}
+
+/**
+ * hns_dsaf_tcam_mc_cfg - cfg the tcam for mc
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @address: tcam index
+ * @ptbl_tcam_data: tcam data struct pointer
+ * @ptbl_tcam_mask: tcam mask struct pointer, it must be null for HNSv1
+ * @ptbl_tcam_mcast: tcam data struct pointer
+ */
+static void hns_dsaf_tcam_mc_cfg(
+ struct dsaf_device *dsaf_dev, u32 address,
+ struct dsaf_tbl_tcam_data *ptbl_tcam_data,
+ struct dsaf_tbl_tcam_data *ptbl_tcam_mask,
+ struct dsaf_tbl_tcam_mcast_cfg *ptbl_tcam_mcast)
+{
+ spin_lock_bh(&dsaf_dev->tcam_lock);
+
+ /*Write Addr*/
+ hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
+ /*Write Tcam Data*/
+ hns_dsaf_tbl_tcam_data_cfg(dsaf_dev, ptbl_tcam_data);
+ /*Write Tcam Mcast*/
+ hns_dsaf_tbl_tcam_mcast_cfg(dsaf_dev, ptbl_tcam_mcast);
+ /* Write Match Data */
+ if (ptbl_tcam_mask)
+ hns_dsaf_tbl_tcam_match_cfg(dsaf_dev, ptbl_tcam_mask);
+
+ /* Write Puls */
+ hns_dsaf_tbl_tcam_data_mcast_pul(dsaf_dev);
+
+ spin_unlock_bh(&dsaf_dev->tcam_lock);
+}
+
+/**
+ * hns_dsaf_tcam_uc_cfg_vague - INT
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @address: the address
+ * @tcam_data: the data
+ * @tcam_mask: the mask
+ * @tcam_uc: the unicast data
+ */
+static void hns_dsaf_tcam_uc_cfg_vague(struct dsaf_device *dsaf_dev,
+ u32 address,
+ struct dsaf_tbl_tcam_data *tcam_data,
+ struct dsaf_tbl_tcam_data *tcam_mask,
+ struct dsaf_tbl_tcam_ucast_cfg *tcam_uc)
+{
+ spin_lock_bh(&dsaf_dev->tcam_lock);
+ hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
+ hns_dsaf_tbl_tcam_data_cfg(dsaf_dev, tcam_data);
+ hns_dsaf_tbl_tcam_ucast_cfg(dsaf_dev, tcam_uc);
+ hns_dsaf_tbl_tcam_match_cfg(dsaf_dev, tcam_mask);
+ hns_dsaf_tbl_tcam_data_ucast_pul(dsaf_dev);
+
+ /*Restore Match Data*/
+ tcam_mask->tbl_tcam_data_high = 0xffffffff;
+ tcam_mask->tbl_tcam_data_low = 0xffffffff;
+ hns_dsaf_tbl_tcam_match_cfg(dsaf_dev, tcam_mask);
+
+ spin_unlock_bh(&dsaf_dev->tcam_lock);
+}
+
+/**
+ * hns_dsaf_tcam_mc_cfg_vague - INT
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @address: the address
+ * @tcam_data: the data
+ * @tcam_mask: the mask
+ * @tcam_mc: the multicast data
+ */
+static void hns_dsaf_tcam_mc_cfg_vague(struct dsaf_device *dsaf_dev,
+ u32 address,
+ struct dsaf_tbl_tcam_data *tcam_data,
+ struct dsaf_tbl_tcam_data *tcam_mask,
+ struct dsaf_tbl_tcam_mcast_cfg *tcam_mc)
+{
+ spin_lock_bh(&dsaf_dev->tcam_lock);
+ hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
+ hns_dsaf_tbl_tcam_data_cfg(dsaf_dev, tcam_data);
+ hns_dsaf_tbl_tcam_mcast_cfg(dsaf_dev, tcam_mc);
+ hns_dsaf_tbl_tcam_match_cfg(dsaf_dev, tcam_mask);
+ hns_dsaf_tbl_tcam_data_mcast_pul(dsaf_dev);
+
+ /*Restore Match Data*/
+ tcam_mask->tbl_tcam_data_high = 0xffffffff;
+ tcam_mask->tbl_tcam_data_low = 0xffffffff;
+ hns_dsaf_tbl_tcam_match_cfg(dsaf_dev, tcam_mask);
+
+ spin_unlock_bh(&dsaf_dev->tcam_lock);
+}
+
+/**
+ * hns_dsaf_tcam_mc_invld - INT
+ * @dsaf_dev: dsa fabric id
+ * @address: the address
+ */
+static void hns_dsaf_tcam_mc_invld(struct dsaf_device *dsaf_dev, u32 address)
+{
+ spin_lock_bh(&dsaf_dev->tcam_lock);
+
+ /*Write Addr*/
+ hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
+
+ /*write tcam mcast*/
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_0_0_REG, 0);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_1_0_REG, 0);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_2_0_REG, 0);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_3_0_REG, 0);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_4_0_REG, 0);
+
+ /*Write Plus*/
+ hns_dsaf_tbl_tcam_mcast_pul(dsaf_dev);
+
+ spin_unlock_bh(&dsaf_dev->tcam_lock);
+}
+
+static void
+hns_dsaf_tcam_addr_get(struct dsaf_drv_tbl_tcam_key *mac_key, u8 *addr)
+{
+ addr[0] = mac_key->high.bits.mac_0;
+ addr[1] = mac_key->high.bits.mac_1;
+ addr[2] = mac_key->high.bits.mac_2;
+ addr[3] = mac_key->high.bits.mac_3;
+ addr[4] = mac_key->low.bits.mac_4;
+ addr[5] = mac_key->low.bits.mac_5;
+}
+
+/**
+ * hns_dsaf_tcam_uc_get - INT
+ * @dsaf_dev: dsa fabric id
+ * @address: the address
+ * @ptbl_tcam_data: the data
+ * @ptbl_tcam_ucast: unicast
+ */
+static void hns_dsaf_tcam_uc_get(
+ struct dsaf_device *dsaf_dev, u32 address,
+ struct dsaf_tbl_tcam_data *ptbl_tcam_data,
+ struct dsaf_tbl_tcam_ucast_cfg *ptbl_tcam_ucast)
+{
+ u32 tcam_read_data0;
+ u32 tcam_read_data4;
+
+ spin_lock_bh(&dsaf_dev->tcam_lock);
+
+ /*Write Addr*/
+ hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
+
+ /*read tcam item puls*/
+ hns_dsaf_tbl_tcam_load_pul(dsaf_dev);
+
+ /*read tcam data*/
+ ptbl_tcam_data->tbl_tcam_data_high
+ = dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RDATA_HIGH_0_REG);
+ ptbl_tcam_data->tbl_tcam_data_low
+ = dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RDATA_LOW_0_REG);
+
+ /*read tcam mcast*/
+ tcam_read_data0 = dsaf_read_dev(dsaf_dev,
+ DSAF_TBL_TCAM_RAM_RDATA0_0_REG);
+ tcam_read_data4 = dsaf_read_dev(dsaf_dev,
+ DSAF_TBL_TCAM_RAM_RDATA4_0_REG);
+
+ ptbl_tcam_ucast->tbl_ucast_item_vld
+ = dsaf_get_bit(tcam_read_data4,
+ DSAF_TBL_MCAST_CFG4_ITEM_VLD_S);
+ ptbl_tcam_ucast->tbl_ucast_old_en
+ = dsaf_get_bit(tcam_read_data4, DSAF_TBL_MCAST_CFG4_OLD_EN_S);
+ ptbl_tcam_ucast->tbl_ucast_mac_discard
+ = dsaf_get_bit(tcam_read_data0,
+ DSAF_TBL_UCAST_CFG1_MAC_DISCARD_S);
+ ptbl_tcam_ucast->tbl_ucast_out_port
+ = dsaf_get_field(tcam_read_data0,
+ DSAF_TBL_UCAST_CFG1_OUT_PORT_M,
+ DSAF_TBL_UCAST_CFG1_OUT_PORT_S);
+ ptbl_tcam_ucast->tbl_ucast_dvc
+ = dsaf_get_bit(tcam_read_data0, DSAF_TBL_UCAST_CFG1_DVC_S);
+
+ spin_unlock_bh(&dsaf_dev->tcam_lock);
+}
+
+/**
+ * hns_dsaf_tcam_mc_get - INT
+ * @dsaf_dev: dsa fabric id
+ * @address: the address
+ * @ptbl_tcam_data: the data
+ * @ptbl_tcam_mcast: tcam multicast data
+ */
+static void hns_dsaf_tcam_mc_get(
+ struct dsaf_device *dsaf_dev, u32 address,
+ struct dsaf_tbl_tcam_data *ptbl_tcam_data,
+ struct dsaf_tbl_tcam_mcast_cfg *ptbl_tcam_mcast)
+{
+ u32 data_tmp;
+
+ spin_lock_bh(&dsaf_dev->tcam_lock);
+
+ /*Write Addr*/
+ hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
+
+ /*read tcam item puls*/
+ hns_dsaf_tbl_tcam_load_pul(dsaf_dev);
+
+ /*read tcam data*/
+ ptbl_tcam_data->tbl_tcam_data_high =
+ dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RDATA_HIGH_0_REG);
+ ptbl_tcam_data->tbl_tcam_data_low =
+ dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RDATA_LOW_0_REG);
+
+ /*read tcam mcast*/
+ ptbl_tcam_mcast->tbl_mcast_port_msk[0] =
+ dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RAM_RDATA0_0_REG);
+ ptbl_tcam_mcast->tbl_mcast_port_msk[1] =
+ dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RAM_RDATA1_0_REG);
+ ptbl_tcam_mcast->tbl_mcast_port_msk[2] =
+ dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RAM_RDATA2_0_REG);
+ ptbl_tcam_mcast->tbl_mcast_port_msk[3] =
+ dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RAM_RDATA3_0_REG);
+
+ data_tmp = dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RAM_RDATA4_0_REG);
+ ptbl_tcam_mcast->tbl_mcast_item_vld =
+ dsaf_get_bit(data_tmp, DSAF_TBL_MCAST_CFG4_ITEM_VLD_S);
+ ptbl_tcam_mcast->tbl_mcast_old_en =
+ dsaf_get_bit(data_tmp, DSAF_TBL_MCAST_CFG4_OLD_EN_S);
+ ptbl_tcam_mcast->tbl_mcast_port_msk[4] =
+ dsaf_get_field(data_tmp, DSAF_TBL_MCAST_CFG4_VM128_112_M,
+ DSAF_TBL_MCAST_CFG4_VM128_112_S);
+
+ spin_unlock_bh(&dsaf_dev->tcam_lock);
+}
+
+/**
+ * hns_dsaf_tbl_line_init - INT
+ * @dsaf_dev: dsa fabric id
+ */
+static void hns_dsaf_tbl_line_init(struct dsaf_device *dsaf_dev)
+{
+ u32 i;
+ /* defaultly set all lineal mac table entry resulting discard */
+ struct dsaf_tbl_line_cfg tbl_line[] = {{1, 0, 0} };
+
+ for (i = 0; i < DSAF_LINE_SUM; i++)
+ hns_dsaf_single_line_tbl_cfg(dsaf_dev, i, tbl_line);
+}
+
+/**
+ * hns_dsaf_tbl_tcam_init - INT
+ * @dsaf_dev: dsa fabric id
+ */
+static void hns_dsaf_tbl_tcam_init(struct dsaf_device *dsaf_dev)
+{
+ u32 i;
+ struct dsaf_tbl_tcam_data tcam_data[] = {{0, 0} };
+ struct dsaf_tbl_tcam_ucast_cfg tcam_ucast[] = {{0, 0, 0, 0, 0} };
+
+ /*tcam tbl*/
+ for (i = 0; i < DSAF_TCAM_SUM; i++)
+ hns_dsaf_tcam_uc_cfg(dsaf_dev, i, tcam_data, tcam_ucast);
+}
+
+/**
+ * hns_dsaf_pfc_en_cfg - dsaf pfc pause cfg
+ * @dsaf_dev: dsa fabric id
+ * @mac_id: mac contrl block
+ * @tc_en: traffic class
+ */
+static void hns_dsaf_pfc_en_cfg(struct dsaf_device *dsaf_dev,
+ int mac_id, int tc_en)
+{
+ dsaf_write_dev(dsaf_dev, DSAF_PFC_EN_0_REG + mac_id * 4, tc_en);
+}
+
+static void hns_dsaf_set_pfc_pause(struct dsaf_device *dsaf_dev,
+ int mac_id, int tx_en, int rx_en)
+{
+ if (AE_IS_VER1(dsaf_dev->dsaf_ver)) {
+ if (!tx_en || !rx_en)
+ dev_err(dsaf_dev->dev, "dsaf v1 can not close pfc!\n");
+
+ return;
+ }
+
+ dsaf_set_dev_bit(dsaf_dev, DSAF_PAUSE_CFG_REG + mac_id * 4,
+ DSAF_PFC_PAUSE_RX_EN_B, !!rx_en);
+ dsaf_set_dev_bit(dsaf_dev, DSAF_PAUSE_CFG_REG + mac_id * 4,
+ DSAF_PFC_PAUSE_TX_EN_B, !!tx_en);
+}
+
+int hns_dsaf_set_rx_mac_pause_en(struct dsaf_device *dsaf_dev, int mac_id,
+ u32 en)
+{
+ if (AE_IS_VER1(dsaf_dev->dsaf_ver)) {
+ if (!en) {
+ dev_err(dsaf_dev->dev, "dsafv1 can't close rx_pause!\n");
+ return -EINVAL;
+ }
+ }
+
+ dsaf_set_dev_bit(dsaf_dev, DSAF_PAUSE_CFG_REG + mac_id * 4,
+ DSAF_MAC_PAUSE_RX_EN_B, !!en);
+
+ return 0;
+}
+
+void hns_dsaf_get_rx_mac_pause_en(struct dsaf_device *dsaf_dev, int mac_id,
+ u32 *en)
+{
+ if (AE_IS_VER1(dsaf_dev->dsaf_ver))
+ *en = 1;
+ else
+ *en = dsaf_get_dev_bit(dsaf_dev,
+ DSAF_PAUSE_CFG_REG + mac_id * 4,
+ DSAF_MAC_PAUSE_RX_EN_B);
+}
+
+/**
+ * hns_dsaf_comm_init - INT
+ * @dsaf_dev: dsa fabric id
+ */
+static void hns_dsaf_comm_init(struct dsaf_device *dsaf_dev)
+{
+ u32 i;
+ u32 o_dsaf_cfg;
+ bool is_ver1 = AE_IS_VER1(dsaf_dev->dsaf_ver);
+
+ o_dsaf_cfg = dsaf_read_dev(dsaf_dev, DSAF_CFG_0_REG);
+ dsaf_set_bit(o_dsaf_cfg, DSAF_CFG_EN_S, dsaf_dev->dsaf_en);
+ dsaf_set_bit(o_dsaf_cfg, DSAF_CFG_TC_MODE_S, dsaf_dev->dsaf_tc_mode);
+ dsaf_set_bit(o_dsaf_cfg, DSAF_CFG_CRC_EN_S, 0);
+ dsaf_set_bit(o_dsaf_cfg, DSAF_CFG_MIX_MODE_S, 0);
+ dsaf_set_bit(o_dsaf_cfg, DSAF_CFG_LOCA_ADDR_EN_S, 0);
+ dsaf_write_dev(dsaf_dev, DSAF_CFG_0_REG, o_dsaf_cfg);
+
+ hns_dsaf_reg_cnt_clr_ce(dsaf_dev, 1);
+ hns_dsaf_stp_port_type_cfg(dsaf_dev, DSAF_STP_PORT_TYPE_FORWARD);
+
+ /* set 22 queue per tx ppe engine, only used in switch mode */
+ hns_dsaf_ppe_qid_cfg(dsaf_dev, DSAF_DEFAUTL_QUEUE_NUM_PER_PPE);
+
+ /* set promisc def queue id */
+ hns_dsaf_mix_def_qid_cfg(dsaf_dev);
+
+ /* set inner loopback queue id */
+ hns_dsaf_inner_qid_cfg(dsaf_dev);
+
+ /* in non switch mode, set all port to access mode */
+ hns_dsaf_sw_port_type_cfg(dsaf_dev, DSAF_SW_PORT_TYPE_NON_VLAN);
+
+ /*set dsaf pfc to 0 for parseing rx pause*/
+ for (i = 0; i < DSAF_COMM_CHN; i++) {
+ hns_dsaf_pfc_en_cfg(dsaf_dev, i, 0);
+ hns_dsaf_set_pfc_pause(dsaf_dev, i, is_ver1, is_ver1);
+ }
+
+ /*msk and clr exception irqs */
+ for (i = 0; i < DSAF_COMM_CHN; i++) {
+ hns_dsaf_int_xge_src_clr(dsaf_dev, i, 0xfffffffful);
+ hns_dsaf_int_ppe_src_clr(dsaf_dev, i, 0xfffffffful);
+ hns_dsaf_int_rocee_src_clr(dsaf_dev, i, 0xfffffffful);
+
+ hns_dsaf_int_xge_msk_set(dsaf_dev, i, 0xfffffffful);
+ hns_dsaf_int_ppe_msk_set(dsaf_dev, i, 0xfffffffful);
+ hns_dsaf_int_rocee_msk_set(dsaf_dev, i, 0xfffffffful);
+ }
+ hns_dsaf_int_tbl_src_clr(dsaf_dev, 0xfffffffful);
+ hns_dsaf_int_tbl_msk_set(dsaf_dev, 0xfffffffful);
+}
+
+/**
+ * hns_dsaf_inode_init - INT
+ * @dsaf_dev: dsa fabric id
+ */
+static void hns_dsaf_inode_init(struct dsaf_device *dsaf_dev)
+{
+ u32 reg;
+ u32 tc_cfg;
+ u32 i;
+
+ if (dsaf_dev->dsaf_tc_mode == HRD_DSAF_4TC_MODE)
+ tc_cfg = HNS_DSAF_I4TC_CFG;
+ else
+ tc_cfg = HNS_DSAF_I8TC_CFG;
+
+ if (AE_IS_VER1(dsaf_dev->dsaf_ver)) {
+ for (i = 0; i < DSAF_INODE_NUM; i++) {
+ reg = DSAF_INODE_IN_PORT_NUM_0_REG + 0x80 * i;
+ dsaf_set_dev_field(dsaf_dev, reg,
+ DSAF_INODE_IN_PORT_NUM_M,
+ DSAF_INODE_IN_PORT_NUM_S,
+ i % DSAF_XGE_NUM);
+ }
+ } else {
+ for (i = 0; i < DSAF_PORT_TYPE_NUM; i++) {
+ reg = DSAF_INODE_IN_PORT_NUM_0_REG + 0x80 * i;
+ dsaf_set_dev_field(dsaf_dev, reg,
+ DSAF_INODE_IN_PORT_NUM_M,
+ DSAF_INODE_IN_PORT_NUM_S, 0);
+ dsaf_set_dev_field(dsaf_dev, reg,
+ DSAFV2_INODE_IN_PORT1_NUM_M,
+ DSAFV2_INODE_IN_PORT1_NUM_S, 1);
+ dsaf_set_dev_field(dsaf_dev, reg,
+ DSAFV2_INODE_IN_PORT2_NUM_M,
+ DSAFV2_INODE_IN_PORT2_NUM_S, 2);
+ dsaf_set_dev_field(dsaf_dev, reg,
+ DSAFV2_INODE_IN_PORT3_NUM_M,
+ DSAFV2_INODE_IN_PORT3_NUM_S, 3);
+ dsaf_set_dev_field(dsaf_dev, reg,
+ DSAFV2_INODE_IN_PORT4_NUM_M,
+ DSAFV2_INODE_IN_PORT4_NUM_S, 4);
+ dsaf_set_dev_field(dsaf_dev, reg,
+ DSAFV2_INODE_IN_PORT5_NUM_M,
+ DSAFV2_INODE_IN_PORT5_NUM_S, 5);
+ }
+ }
+ for (i = 0; i < DSAF_INODE_NUM; i++) {
+ reg = DSAF_INODE_PRI_TC_CFG_0_REG + 0x80 * i;
+ dsaf_write_dev(dsaf_dev, reg, tc_cfg);
+ }
+}
+
+/**
+ * hns_dsaf_sbm_init - INT
+ * @dsaf_dev: dsa fabric id
+ */
+static int hns_dsaf_sbm_init(struct dsaf_device *dsaf_dev)
+{
+ u32 flag;
+ u32 finish_msk;
+ u32 cnt = 0;
+ int ret;
+
+ if (AE_IS_VER1(dsaf_dev->dsaf_ver)) {
+ hns_dsaf_sbm_bp_wl_cfg(dsaf_dev);
+ finish_msk = DSAF_SRAM_INIT_OVER_M;
+ } else {
+ hns_dsafv2_sbm_bp_wl_cfg(dsaf_dev);
+ finish_msk = DSAFV2_SRAM_INIT_OVER_M;
+ }
+
+ /* enable sbm chanel, disable sbm chanel shcut function*/
+ hns_dsaf_sbm_cfg(dsaf_dev);
+
+ /* enable sbm mib */
+ ret = hns_dsaf_sbm_cfg_mib_en(dsaf_dev);
+ if (ret) {
+ dev_err(dsaf_dev->dev,
+ "hns_dsaf_sbm_cfg_mib_en fail,%s, ret=%d\n",
+ dsaf_dev->ae_dev.name, ret);
+ return ret;
+ }
+
+ /* enable sbm initial link sram */
+ hns_dsaf_sbm_link_sram_init_en(dsaf_dev);
+
+ do {
+ usleep_range(200, 210);/*udelay(200);*/
+ flag = dsaf_get_dev_field(dsaf_dev, DSAF_SRAM_INIT_OVER_0_REG,
+ finish_msk, DSAF_SRAM_INIT_OVER_S);
+ cnt++;
+ } while (flag != (finish_msk >> DSAF_SRAM_INIT_OVER_S) &&
+ cnt < DSAF_CFG_READ_CNT);
+
+ if (flag != (finish_msk >> DSAF_SRAM_INIT_OVER_S)) {
+ dev_err(dsaf_dev->dev,
+ "hns_dsaf_sbm_init fail %s, flag=%d, cnt=%d\n",
+ dsaf_dev->ae_dev.name, flag, cnt);
+ return -ENODEV;
+ }
+
+ hns_dsaf_rocee_bp_en(dsaf_dev);
+
+ return 0;
+}
+
+/**
+ * hns_dsaf_tbl_init - INT
+ * @dsaf_dev: dsa fabric id
+ */
+static void hns_dsaf_tbl_init(struct dsaf_device *dsaf_dev)
+{
+ hns_dsaf_tbl_stat_en(dsaf_dev);
+
+ hns_dsaf_tbl_tcam_init(dsaf_dev);
+ hns_dsaf_tbl_line_init(dsaf_dev);
+}
+
+/**
+ * hns_dsaf_voq_init - INT
+ * @dsaf_dev: dsa fabric id
+ */
+static void hns_dsaf_voq_init(struct dsaf_device *dsaf_dev)
+{
+ hns_dsaf_voq_bp_all_thrd_cfg(dsaf_dev);
+}
+
+/**
+ * hns_dsaf_init_hw - init dsa fabric hardware
+ * @dsaf_dev: dsa fabric device struct pointer
+ */
+static int hns_dsaf_init_hw(struct dsaf_device *dsaf_dev)
+{
+ int ret;
+
+ dev_dbg(dsaf_dev->dev,
+ "hns_dsaf_init_hw begin %s !\n", dsaf_dev->ae_dev.name);
+
+ dsaf_dev->misc_op->dsaf_reset(dsaf_dev, 0);
+ mdelay(10);
+ dsaf_dev->misc_op->dsaf_reset(dsaf_dev, 1);
+
+ hns_dsaf_comm_init(dsaf_dev);
+
+ /*init XBAR_INODE*/
+ hns_dsaf_inode_init(dsaf_dev);
+
+ /*init SBM*/
+ ret = hns_dsaf_sbm_init(dsaf_dev);
+ if (ret)
+ return ret;
+
+ /*init TBL*/
+ hns_dsaf_tbl_init(dsaf_dev);
+
+ /*init VOQ*/
+ hns_dsaf_voq_init(dsaf_dev);
+
+ return 0;
+}
+
+/**
+ * hns_dsaf_remove_hw - uninit dsa fabric hardware
+ * @dsaf_dev: dsa fabric device struct pointer
+ */
+static void hns_dsaf_remove_hw(struct dsaf_device *dsaf_dev)
+{
+ /*reset*/
+ dsaf_dev->misc_op->dsaf_reset(dsaf_dev, 0);
+}
+
+/**
+ * hns_dsaf_init - init dsa fabric
+ * @dsaf_dev: dsa fabric device struct pointer
+ * return 0 - success , negative --fail
+ */
+static int hns_dsaf_init(struct dsaf_device *dsaf_dev)
+{
+ struct dsaf_drv_priv *priv =
+ (struct dsaf_drv_priv *)hns_dsaf_dev_priv(dsaf_dev);
+ u32 i;
+ int ret;
+
+ if (HNS_DSAF_IS_DEBUG(dsaf_dev))
+ return 0;
+
+ if (AE_IS_VER1(dsaf_dev->dsaf_ver))
+ dsaf_dev->tcam_max_num = DSAF_TCAM_SUM;
+ else
+ dsaf_dev->tcam_max_num =
+ DSAF_TCAM_SUM - DSAFV2_MAC_FUZZY_TCAM_NUM;
+
+ spin_lock_init(&dsaf_dev->tcam_lock);
+ ret = hns_dsaf_init_hw(dsaf_dev);
+ if (ret)
+ return ret;
+
+ /* malloc mem for tcam mac key(vlan+mac) */
+ priv->soft_mac_tbl = vzalloc(array_size(DSAF_TCAM_SUM,
+ sizeof(*priv->soft_mac_tbl)));
+ if (!priv->soft_mac_tbl) {
+ ret = -ENOMEM;
+ goto remove_hw;
+ }
+
+ /*all entry invall */
+ for (i = 0; i < DSAF_TCAM_SUM; i++)
+ (priv->soft_mac_tbl + i)->index = DSAF_INVALID_ENTRY_IDX;
+
+ return 0;
+
+remove_hw:
+ hns_dsaf_remove_hw(dsaf_dev);
+ return ret;
+}
+
+/**
+ * hns_dsaf_free - free dsa fabric
+ * @dsaf_dev: dsa fabric device struct pointer
+ */
+static void hns_dsaf_free(struct dsaf_device *dsaf_dev)
+{
+ struct dsaf_drv_priv *priv =
+ (struct dsaf_drv_priv *)hns_dsaf_dev_priv(dsaf_dev);
+
+ hns_dsaf_remove_hw(dsaf_dev);
+
+ /* free all mac mem */
+ vfree(priv->soft_mac_tbl);
+ priv->soft_mac_tbl = NULL;
+}
+
+/**
+ * hns_dsaf_find_soft_mac_entry - find dsa fabric soft entry
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @mac_key: mac entry struct pointer
+ */
+static u16 hns_dsaf_find_soft_mac_entry(
+ struct dsaf_device *dsaf_dev,
+ struct dsaf_drv_tbl_tcam_key *mac_key)
+{
+ struct dsaf_drv_priv *priv =
+ (struct dsaf_drv_priv *)hns_dsaf_dev_priv(dsaf_dev);
+ struct dsaf_drv_soft_mac_tbl *soft_mac_entry;
+ u32 i;
+
+ soft_mac_entry = priv->soft_mac_tbl;
+ for (i = 0; i < dsaf_dev->tcam_max_num; i++) {
+ /* invall tab entry */
+ if ((soft_mac_entry->index != DSAF_INVALID_ENTRY_IDX) &&
+ (soft_mac_entry->tcam_key.high.val == mac_key->high.val) &&
+ (soft_mac_entry->tcam_key.low.val == mac_key->low.val))
+ /* return find result --soft index */
+ return soft_mac_entry->index;
+
+ soft_mac_entry++;
+ }
+ return DSAF_INVALID_ENTRY_IDX;
+}
+
+/**
+ * hns_dsaf_find_empty_mac_entry - search dsa fabric soft empty-entry
+ * @dsaf_dev: dsa fabric device struct pointer
+ */
+static u16 hns_dsaf_find_empty_mac_entry(struct dsaf_device *dsaf_dev)
+{
+ struct dsaf_drv_priv *priv =
+ (struct dsaf_drv_priv *)hns_dsaf_dev_priv(dsaf_dev);
+ struct dsaf_drv_soft_mac_tbl *soft_mac_entry;
+ u32 i;
+
+ soft_mac_entry = priv->soft_mac_tbl;
+ for (i = 0; i < dsaf_dev->tcam_max_num; i++) {
+ /* inv all entry */
+ if (soft_mac_entry->index == DSAF_INVALID_ENTRY_IDX)
+ /* return find result --soft index */
+ return i;
+
+ soft_mac_entry++;
+ }
+ return DSAF_INVALID_ENTRY_IDX;
+}
+
+/**
+ * hns_dsaf_find_empty_mac_entry_reverse
+ * search dsa fabric soft empty-entry from the end
+ * @dsaf_dev: dsa fabric device struct pointer
+ */
+static u16 hns_dsaf_find_empty_mac_entry_reverse(struct dsaf_device *dsaf_dev)
+{
+ struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev);
+ struct dsaf_drv_soft_mac_tbl *soft_mac_entry;
+ int i;
+
+ soft_mac_entry = priv->soft_mac_tbl + (DSAF_TCAM_SUM - 1);
+ for (i = (DSAF_TCAM_SUM - 1); i > 0; i--) {
+ /* search all entry from end to start.*/
+ if (soft_mac_entry->index == DSAF_INVALID_ENTRY_IDX)
+ return i;
+ soft_mac_entry--;
+ }
+ return DSAF_INVALID_ENTRY_IDX;
+}
+
+/**
+ * hns_dsaf_set_mac_key - set mac key
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @mac_key: tcam key pointer
+ * @vlan_id: vlan id
+ * @in_port_num: input port num
+ * @addr: mac addr
+ */
+static void hns_dsaf_set_mac_key(
+ struct dsaf_device *dsaf_dev,
+ struct dsaf_drv_tbl_tcam_key *mac_key, u16 vlan_id, u8 in_port_num,
+ u8 *addr)
+{
+ u8 port;
+
+ if (dsaf_dev->dsaf_mode <= DSAF_MODE_ENABLE)
+ /*DSAF mode : in port id fixed 0*/
+ port = 0;
+ else
+ /*non-dsaf mode*/
+ port = in_port_num;
+
+ mac_key->high.bits.mac_0 = addr[0];
+ mac_key->high.bits.mac_1 = addr[1];
+ mac_key->high.bits.mac_2 = addr[2];
+ mac_key->high.bits.mac_3 = addr[3];
+ mac_key->low.bits.mac_4 = addr[4];
+ mac_key->low.bits.mac_5 = addr[5];
+ mac_key->low.bits.port_vlan = 0;
+ dsaf_set_field(mac_key->low.bits.port_vlan, DSAF_TBL_TCAM_KEY_VLAN_M,
+ DSAF_TBL_TCAM_KEY_VLAN_S, vlan_id);
+ dsaf_set_field(mac_key->low.bits.port_vlan, DSAF_TBL_TCAM_KEY_PORT_M,
+ DSAF_TBL_TCAM_KEY_PORT_S, port);
+}
+
+/**
+ * hns_dsaf_set_mac_uc_entry - set mac uc-entry
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @mac_entry: uc-mac entry
+ */
+int hns_dsaf_set_mac_uc_entry(
+ struct dsaf_device *dsaf_dev,
+ struct dsaf_drv_mac_single_dest_entry *mac_entry)
+{
+ u16 entry_index;
+ struct dsaf_drv_tbl_tcam_key mac_key;
+ struct dsaf_tbl_tcam_ucast_cfg mac_data;
+ struct dsaf_drv_priv *priv =
+ (struct dsaf_drv_priv *)hns_dsaf_dev_priv(dsaf_dev);
+ struct dsaf_drv_soft_mac_tbl *soft_mac_entry = priv->soft_mac_tbl;
+ struct dsaf_tbl_tcam_data tcam_data;
+
+ /* mac addr check */
+ if (MAC_IS_ALL_ZEROS(mac_entry->addr) ||
+ MAC_IS_BROADCAST(mac_entry->addr) ||
+ MAC_IS_MULTICAST(mac_entry->addr)) {
+ dev_err(dsaf_dev->dev, "set_uc %s Mac %pM err!\n",
+ dsaf_dev->ae_dev.name, mac_entry->addr);
+ return -EINVAL;
+ }
+
+ /* config key */
+ hns_dsaf_set_mac_key(dsaf_dev, &mac_key, mac_entry->in_vlan_id,
+ mac_entry->in_port_num, mac_entry->addr);
+
+ /* entry ie exist? */
+ entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
+ if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+ /*if has not inv entry,find a empty entry */
+ entry_index = hns_dsaf_find_empty_mac_entry(dsaf_dev);
+ if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+ /* has not empty,return error */
+ dev_err(dsaf_dev->dev,
+ "set_uc_entry failed, %s Mac key(%#x:%#x)\n",
+ dsaf_dev->ae_dev.name,
+ mac_key.high.val, mac_key.low.val);
+ return -EINVAL;
+ }
+ }
+
+ dev_dbg(dsaf_dev->dev,
+ "set_uc_entry, %s Mac key(%#x:%#x) entry_index%d\n",
+ dsaf_dev->ae_dev.name, mac_key.high.val,
+ mac_key.low.val, entry_index);
+
+ /* config hardware entry */
+ mac_data.tbl_ucast_item_vld = 1;
+ mac_data.tbl_ucast_mac_discard = 0;
+ mac_data.tbl_ucast_old_en = 0;
+ /* default config dvc to 0 */
+ mac_data.tbl_ucast_dvc = 0;
+ mac_data.tbl_ucast_out_port = mac_entry->port_num;
+ tcam_data.tbl_tcam_data_high = mac_key.high.val;
+ tcam_data.tbl_tcam_data_low = mac_key.low.val;
+
+ hns_dsaf_tcam_uc_cfg(dsaf_dev, entry_index, &tcam_data, &mac_data);
+
+ /* config software entry */
+ soft_mac_entry += entry_index;
+ soft_mac_entry->index = entry_index;
+ soft_mac_entry->tcam_key.high.val = mac_key.high.val;
+ soft_mac_entry->tcam_key.low.val = mac_key.low.val;
+
+ return 0;
+}
+
+int hns_dsaf_rm_mac_addr(
+ struct dsaf_device *dsaf_dev,
+ struct dsaf_drv_mac_single_dest_entry *mac_entry)
+{
+ u16 entry_index;
+ struct dsaf_tbl_tcam_ucast_cfg mac_data;
+ struct dsaf_drv_tbl_tcam_key mac_key;
+
+ /* mac addr check */
+ if (!is_valid_ether_addr(mac_entry->addr)) {
+ dev_err(dsaf_dev->dev, "rm_uc_addr %s Mac %pM err!\n",
+ dsaf_dev->ae_dev.name, mac_entry->addr);
+ return -EINVAL;
+ }
+
+ /* config key */
+ hns_dsaf_set_mac_key(dsaf_dev, &mac_key, mac_entry->in_vlan_id,
+ mac_entry->in_port_num, mac_entry->addr);
+
+ entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
+ if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+ /* can not find the tcam entry, return 0 */
+ dev_info(dsaf_dev->dev,
+ "rm_uc_addr no tcam, %s Mac key(%#x:%#x)\n",
+ dsaf_dev->ae_dev.name,
+ mac_key.high.val, mac_key.low.val);
+ return 0;
+ }
+
+ dev_dbg(dsaf_dev->dev,
+ "rm_uc_addr, %s Mac key(%#x:%#x) entry_index%d\n",
+ dsaf_dev->ae_dev.name, mac_key.high.val,
+ mac_key.low.val, entry_index);
+
+ hns_dsaf_tcam_uc_get(
+ dsaf_dev, entry_index,
+ (struct dsaf_tbl_tcam_data *)&mac_key,
+ &mac_data);
+
+ /* unicast entry not used locally should not clear */
+ if (mac_entry->port_num != mac_data.tbl_ucast_out_port)
+ return -EFAULT;
+
+ return hns_dsaf_del_mac_entry(dsaf_dev,
+ mac_entry->in_vlan_id,
+ mac_entry->in_port_num,
+ mac_entry->addr);
+}
+
+static void hns_dsaf_setup_mc_mask(struct dsaf_device *dsaf_dev,
+ u8 port_num, u8 *mask, u8 *addr)
+{
+ if (MAC_IS_BROADCAST(addr))
+ eth_broadcast_addr(mask);
+ else
+ memcpy(mask, dsaf_dev->mac_cb[port_num]->mc_mask, ETH_ALEN);
+}
+
+static void hns_dsaf_mc_mask_bit_clear(char *dst, const char *src)
+{
+ u16 *a = (u16 *)dst;
+ const u16 *b = (const u16 *)src;
+
+ a[0] &= b[0];
+ a[1] &= b[1];
+ a[2] &= b[2];
+}
+
+/**
+ * hns_dsaf_add_mac_mc_port - add mac mc-port
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @mac_entry: mc-mac entry
+ */
+int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev,
+ struct dsaf_drv_mac_single_dest_entry *mac_entry)
+{
+ u16 entry_index;
+ struct dsaf_drv_tbl_tcam_key mac_key;
+ struct dsaf_drv_tbl_tcam_key mask_key;
+ struct dsaf_tbl_tcam_data *pmask_key = NULL;
+ struct dsaf_tbl_tcam_mcast_cfg mac_data;
+ struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev);
+ struct dsaf_drv_soft_mac_tbl *soft_mac_entry = priv->soft_mac_tbl;
+ struct dsaf_tbl_tcam_data tcam_data;
+ u8 mc_addr[ETH_ALEN];
+ int mskid;
+
+ /*chechk mac addr */
+ if (MAC_IS_ALL_ZEROS(mac_entry->addr)) {
+ dev_err(dsaf_dev->dev, "set_entry failed,addr %pM!\n",
+ mac_entry->addr);
+ return -EINVAL;
+ }
+
+ ether_addr_copy(mc_addr, mac_entry->addr);
+ if (!AE_IS_VER1(dsaf_dev->dsaf_ver)) {
+ u8 mc_mask[ETH_ALEN];
+
+ /* prepare for key data setting */
+ hns_dsaf_setup_mc_mask(dsaf_dev, mac_entry->in_port_num,
+ mc_mask, mac_entry->addr);
+ hns_dsaf_mc_mask_bit_clear(mc_addr, mc_mask);
+
+ /* config key mask */
+ hns_dsaf_set_mac_key(dsaf_dev, &mask_key,
+ 0x0,
+ 0xff,
+ mc_mask);
+
+ pmask_key = (struct dsaf_tbl_tcam_data *)(&mask_key);
+ }
+
+ /*config key */
+ hns_dsaf_set_mac_key(
+ dsaf_dev, &mac_key, mac_entry->in_vlan_id,
+ mac_entry->in_port_num, mc_addr);
+
+ memset(&mac_data, 0, sizeof(struct dsaf_tbl_tcam_mcast_cfg));
+
+ /* check if the tcam is exist */
+ entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
+ if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+ /*if hasnot , find a empty*/
+ entry_index = hns_dsaf_find_empty_mac_entry(dsaf_dev);
+ if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+ /*if hasnot empty, error*/
+ dev_err(dsaf_dev->dev,
+ "set_uc_entry failed, %s Mac key(%#x:%#x)\n",
+ dsaf_dev->ae_dev.name, mac_key.high.val,
+ mac_key.low.val);
+ return -EINVAL;
+ }
+ } else {
+ /* if exist, add in */
+ hns_dsaf_tcam_mc_get(dsaf_dev, entry_index, &tcam_data,
+ &mac_data);
+ }
+
+ /* config hardware entry */
+ if (mac_entry->port_num < DSAF_SERVICE_NW_NUM) {
+ mskid = mac_entry->port_num;
+ } else if (mac_entry->port_num >= DSAF_BASE_INNER_PORT_NUM) {
+ mskid = mac_entry->port_num -
+ DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM;
+ } else {
+ dev_err(dsaf_dev->dev,
+ "%s,pnum(%d)error,key(%#x:%#x)\n",
+ dsaf_dev->ae_dev.name, mac_entry->port_num,
+ mac_key.high.val, mac_key.low.val);
+ return -EINVAL;
+ }
+ dsaf_set_bit(mac_data.tbl_mcast_port_msk[mskid / 32], mskid % 32, 1);
+ mac_data.tbl_mcast_old_en = 0;
+ mac_data.tbl_mcast_item_vld = 1;
+
+ dev_dbg(dsaf_dev->dev,
+ "set_uc_entry, %s Mac key(%#x:%#x) entry_index%d\n",
+ dsaf_dev->ae_dev.name, mac_key.high.val,
+ mac_key.low.val, entry_index);
+
+ tcam_data.tbl_tcam_data_high = mac_key.high.val;
+ tcam_data.tbl_tcam_data_low = mac_key.low.val;
+
+ /* config mc entry with mask */
+ hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index, &tcam_data,
+ pmask_key, &mac_data);
+
+ /*config software entry */
+ soft_mac_entry += entry_index;
+ soft_mac_entry->index = entry_index;
+ soft_mac_entry->tcam_key.high.val = mac_key.high.val;
+ soft_mac_entry->tcam_key.low.val = mac_key.low.val;
+
+ return 0;
+}
+
+/**
+ * hns_dsaf_del_mac_entry - del mac mc-port
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @vlan_id: vlian id
+ * @in_port_num: input port num
+ * @addr : mac addr
+ */
+int hns_dsaf_del_mac_entry(struct dsaf_device *dsaf_dev, u16 vlan_id,
+ u8 in_port_num, u8 *addr)
+{
+ u16 entry_index;
+ struct dsaf_drv_tbl_tcam_key mac_key;
+ struct dsaf_drv_priv *priv =
+ (struct dsaf_drv_priv *)hns_dsaf_dev_priv(dsaf_dev);
+ struct dsaf_drv_soft_mac_tbl *soft_mac_entry = priv->soft_mac_tbl;
+
+ /*check mac addr */
+ if (MAC_IS_ALL_ZEROS(addr) || MAC_IS_BROADCAST(addr)) {
+ dev_err(dsaf_dev->dev, "del_entry failed,addr %pM!\n",
+ addr);
+ return -EINVAL;
+ }
+
+ /*config key */
+ hns_dsaf_set_mac_key(dsaf_dev, &mac_key, vlan_id, in_port_num, addr);
+
+ /*exist ?*/
+ entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
+ if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+ /*not exist, error */
+ dev_err(dsaf_dev->dev,
+ "del_mac_entry failed, %s Mac key(%#x:%#x)\n",
+ dsaf_dev->ae_dev.name,
+ mac_key.high.val, mac_key.low.val);
+ return -EINVAL;
+ }
+ dev_dbg(dsaf_dev->dev,
+ "del_mac_entry, %s Mac key(%#x:%#x) entry_index%d\n",
+ dsaf_dev->ae_dev.name, mac_key.high.val,
+ mac_key.low.val, entry_index);
+
+ /*do del opt*/
+ hns_dsaf_tcam_mc_invld(dsaf_dev, entry_index);
+
+ /*del soft emtry */
+ soft_mac_entry += entry_index;
+ soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX;
+
+ return 0;
+}
+
+/**
+ * hns_dsaf_del_mac_mc_port - del mac mc- port
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @mac_entry: mac entry
+ */
+int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev,
+ struct dsaf_drv_mac_single_dest_entry *mac_entry)
+{
+ u16 entry_index;
+ struct dsaf_drv_tbl_tcam_key mac_key;
+ struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev);
+ struct dsaf_drv_soft_mac_tbl *soft_mac_entry = priv->soft_mac_tbl;
+ u16 vlan_id;
+ u8 in_port_num;
+ struct dsaf_tbl_tcam_mcast_cfg mac_data;
+ struct dsaf_tbl_tcam_data tcam_data;
+ int mskid;
+ const u8 empty_msk[sizeof(mac_data.tbl_mcast_port_msk)] = {0};
+ struct dsaf_drv_tbl_tcam_key mask_key;
+ struct dsaf_tbl_tcam_data *pmask_key = NULL;
+ u8 mc_addr[ETH_ALEN];
+
+ if (!(void *)mac_entry) {
+ dev_err(dsaf_dev->dev,
+ "hns_dsaf_del_mac_mc_port mac_entry is NULL\n");
+ return -EINVAL;
+ }
+
+ /*check mac addr */
+ if (MAC_IS_ALL_ZEROS(mac_entry->addr)) {
+ dev_err(dsaf_dev->dev, "del_port failed, addr %pM!\n",
+ mac_entry->addr);
+ return -EINVAL;
+ }
+
+ /* always mask vlan_id field */
+ ether_addr_copy(mc_addr, mac_entry->addr);
+
+ if (!AE_IS_VER1(dsaf_dev->dsaf_ver)) {
+ u8 mc_mask[ETH_ALEN];
+
+ /* prepare for key data setting */
+ hns_dsaf_setup_mc_mask(dsaf_dev, mac_entry->in_port_num,
+ mc_mask, mac_entry->addr);
+ hns_dsaf_mc_mask_bit_clear(mc_addr, mc_mask);
+
+ /* config key mask */
+ hns_dsaf_set_mac_key(dsaf_dev, &mask_key, 0x00, 0xff, mc_mask);
+
+ pmask_key = (struct dsaf_tbl_tcam_data *)(&mask_key);
+ }
+
+ /* get key info */
+ vlan_id = mac_entry->in_vlan_id;
+ in_port_num = mac_entry->in_port_num;
+
+ /* config key */
+ hns_dsaf_set_mac_key(dsaf_dev, &mac_key, vlan_id, in_port_num, mc_addr);
+
+ /* check if the tcam entry is exist */
+ entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
+ if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+ /*find none */
+ dev_err(dsaf_dev->dev,
+ "find_soft_mac_entry failed, %s Mac key(%#x:%#x)\n",
+ dsaf_dev->ae_dev.name,
+ mac_key.high.val, mac_key.low.val);
+ return -EINVAL;
+ }
+
+ dev_dbg(dsaf_dev->dev,
+ "del_mac_mc_port, %s key(%#x:%#x) index%d\n",
+ dsaf_dev->ae_dev.name, mac_key.high.val,
+ mac_key.low.val, entry_index);
+
+ /* read entry */
+ hns_dsaf_tcam_mc_get(dsaf_dev, entry_index, &tcam_data, &mac_data);
+
+ /*del the port*/
+ if (mac_entry->port_num < DSAF_SERVICE_NW_NUM) {
+ mskid = mac_entry->port_num;
+ } else if (mac_entry->port_num >= DSAF_BASE_INNER_PORT_NUM) {
+ mskid = mac_entry->port_num -
+ DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM;
+ } else {
+ dev_err(dsaf_dev->dev,
+ "%s,pnum(%d)error,key(%#x:%#x)\n",
+ dsaf_dev->ae_dev.name, mac_entry->port_num,
+ mac_key.high.val, mac_key.low.val);
+ return -EINVAL;
+ }
+ dsaf_set_bit(mac_data.tbl_mcast_port_msk[mskid / 32], mskid % 32, 0);
+
+ /*check non port, do del entry */
+ if (!memcmp(mac_data.tbl_mcast_port_msk, empty_msk,
+ sizeof(mac_data.tbl_mcast_port_msk))) {
+ hns_dsaf_tcam_mc_invld(dsaf_dev, entry_index);
+
+ /* del soft entry */
+ soft_mac_entry += entry_index;
+ soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX;
+ } else { /* not zero, just del port, update */
+ tcam_data.tbl_tcam_data_high = mac_key.high.val;
+ tcam_data.tbl_tcam_data_low = mac_key.low.val;
+
+ hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index,
+ &tcam_data,
+ pmask_key, &mac_data);
+ }
+
+ return 0;
+}
+
+int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev, u8 mac_id,
+ u8 port_num)
+{
+ struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev);
+ struct dsaf_drv_soft_mac_tbl *soft_mac_entry;
+ struct dsaf_tbl_tcam_mcast_cfg mac_data;
+ int ret = 0, i;
+
+ if (HNS_DSAF_IS_DEBUG(dsaf_dev))
+ return 0;
+
+ for (i = 0; i < DSAF_TCAM_SUM - DSAFV2_MAC_FUZZY_TCAM_NUM; i++) {
+ u8 addr[ETH_ALEN];
+ u8 port;
+
+ soft_mac_entry = priv->soft_mac_tbl + i;
+
+ hns_dsaf_tcam_addr_get(&soft_mac_entry->tcam_key, addr);
+ port = dsaf_get_field(
+ soft_mac_entry->tcam_key.low.bits.port_vlan,
+ DSAF_TBL_TCAM_KEY_PORT_M,
+ DSAF_TBL_TCAM_KEY_PORT_S);
+ /* check valid tcam mc entry */
+ if (soft_mac_entry->index != DSAF_INVALID_ENTRY_IDX &&
+ port == mac_id &&
+ is_multicast_ether_addr(addr) &&
+ !is_broadcast_ether_addr(addr)) {
+ const u32 empty_msk[DSAF_PORT_MSK_NUM] = {0};
+ struct dsaf_drv_mac_single_dest_entry mac_entry;
+
+ /* disable receiving of this multicast address for
+ * the VF.
+ */
+ ether_addr_copy(mac_entry.addr, addr);
+ mac_entry.in_vlan_id = dsaf_get_field(
+ soft_mac_entry->tcam_key.low.bits.port_vlan,
+ DSAF_TBL_TCAM_KEY_VLAN_M,
+ DSAF_TBL_TCAM_KEY_VLAN_S);
+ mac_entry.in_port_num = mac_id;
+ mac_entry.port_num = port_num;
+ if (hns_dsaf_del_mac_mc_port(dsaf_dev, &mac_entry)) {
+ ret = -EINVAL;
+ continue;
+ }
+
+ /* disable receiving of this multicast address for
+ * the mac port if all VF are disable
+ */
+ hns_dsaf_tcam_mc_get(dsaf_dev, i,
+ (struct dsaf_tbl_tcam_data *)
+ (&soft_mac_entry->tcam_key),
+ &mac_data);
+ dsaf_set_bit(mac_data.tbl_mcast_port_msk[mac_id / 32],
+ mac_id % 32, 0);
+ if (!memcmp(mac_data.tbl_mcast_port_msk, empty_msk,
+ sizeof(u32) * DSAF_PORT_MSK_NUM)) {
+ mac_entry.port_num = mac_id;
+ if (hns_dsaf_del_mac_mc_port(dsaf_dev,
+ &mac_entry)) {
+ ret = -EINVAL;
+ continue;
+ }
+ }
+ }
+ }
+
+ return ret;
+}
+
+static struct dsaf_device *hns_dsaf_alloc_dev(struct device *dev,
+ size_t sizeof_priv)
+{
+ struct dsaf_device *dsaf_dev;
+
+ dsaf_dev = devm_kzalloc(dev,
+ sizeof(*dsaf_dev) + sizeof_priv, GFP_KERNEL);
+ if (unlikely(!dsaf_dev)) {
+ dsaf_dev = ERR_PTR(-ENOMEM);
+ } else {
+ dsaf_dev->dev = dev;
+ dev_set_drvdata(dev, dsaf_dev);
+ }
+
+ return dsaf_dev;
+}
+
+/**
+ * hns_dsaf_free_dev - free dev mem
+ * @dsaf_dev: struct device pointer
+ */
+static void hns_dsaf_free_dev(struct dsaf_device *dsaf_dev)
+{
+ (void)dev_set_drvdata(dsaf_dev->dev, NULL);
+}
+
+/**
+ * hns_dsaf_pfc_unit_cnt - set pfc unit count
+ * @dsaf_dev: dsa fabric id
+ * @mac_id: id in use
+ * @rate: value array
+ */
+static void hns_dsaf_pfc_unit_cnt(struct dsaf_device *dsaf_dev, int mac_id,
+ enum dsaf_port_rate_mode rate)
+{
+ u32 unit_cnt;
+
+ switch (rate) {
+ case DSAF_PORT_RATE_10000:
+ unit_cnt = HNS_DSAF_PFC_UNIT_CNT_FOR_XGE;
+ break;
+ case DSAF_PORT_RATE_1000:
+ unit_cnt = HNS_DSAF_PFC_UNIT_CNT_FOR_GE_1000;
+ break;
+ case DSAF_PORT_RATE_2500:
+ unit_cnt = HNS_DSAF_PFC_UNIT_CNT_FOR_GE_1000;
+ break;
+ default:
+ unit_cnt = HNS_DSAF_PFC_UNIT_CNT_FOR_XGE;
+ }
+
+ dsaf_set_dev_field(dsaf_dev,
+ (DSAF_PFC_UNIT_CNT_0_REG + 0x4 * (u64)mac_id),
+ DSAF_PFC_UNINT_CNT_M, DSAF_PFC_UNINT_CNT_S,
+ unit_cnt);
+}
+
+/**
+ * hns_dsaf_port_work_rate_cfg - fifo
+ * @dsaf_dev: dsa fabric id
+ * @mac_id: mac contrl block
+ * @rate_mode: value array
+ */
+static void
+hns_dsaf_port_work_rate_cfg(struct dsaf_device *dsaf_dev, int mac_id,
+ enum dsaf_port_rate_mode rate_mode)
+{
+ u32 port_work_mode;
+
+ port_work_mode = dsaf_read_dev(
+ dsaf_dev, DSAF_XGE_GE_WORK_MODE_0_REG + 0x4 * (u64)mac_id);
+
+ if (rate_mode == DSAF_PORT_RATE_10000)
+ dsaf_set_bit(port_work_mode, DSAF_XGE_GE_WORK_MODE_S, 1);
+ else
+ dsaf_set_bit(port_work_mode, DSAF_XGE_GE_WORK_MODE_S, 0);
+
+ dsaf_write_dev(dsaf_dev,
+ DSAF_XGE_GE_WORK_MODE_0_REG + 0x4 * (u64)mac_id,
+ port_work_mode);
+
+ hns_dsaf_pfc_unit_cnt(dsaf_dev, mac_id, rate_mode);
+}
+
+/**
+ * hns_dsaf_fix_mac_mode - dsaf modify mac mode
+ * @mac_cb: mac contrl block
+ */
+void hns_dsaf_fix_mac_mode(struct hns_mac_cb *mac_cb)
+{
+ enum dsaf_port_rate_mode mode;
+ struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
+ int mac_id = mac_cb->mac_id;
+
+ if (mac_cb->mac_type != HNAE_PORT_SERVICE)
+ return;
+ if (mac_cb->phy_if == PHY_INTERFACE_MODE_XGMII)
+ mode = DSAF_PORT_RATE_10000;
+ else
+ mode = DSAF_PORT_RATE_1000;
+
+ hns_dsaf_port_work_rate_cfg(dsaf_dev, mac_id, mode);
+}
+
+static u32 hns_dsaf_get_inode_prio_reg(int index)
+{
+ int base_index, offset;
+ u32 base_addr = DSAF_INODE_IN_PRIO_PAUSE_BASE_REG;
+
+ base_index = (index + 1) / DSAF_REG_PER_ZONE;
+ offset = (index + 1) % DSAF_REG_PER_ZONE;
+
+ return base_addr + DSAF_INODE_IN_PRIO_PAUSE_BASE_OFFSET * base_index +
+ DSAF_INODE_IN_PRIO_PAUSE_OFFSET * offset;
+}
+
+void hns_dsaf_update_stats(struct dsaf_device *dsaf_dev, u32 node_num)
+{
+ struct dsaf_hw_stats *hw_stats
+ = &dsaf_dev->hw_stats[node_num];
+ bool is_ver1 = AE_IS_VER1(dsaf_dev->dsaf_ver);
+ int i;
+ u32 reg_tmp;
+
+ hw_stats->pad_drop += dsaf_read_dev(dsaf_dev,
+ DSAF_INODE_PAD_DISCARD_NUM_0_REG + 0x80 * (u64)node_num);
+ hw_stats->man_pkts += dsaf_read_dev(dsaf_dev,
+ DSAF_INODE_FINAL_IN_MAN_NUM_0_REG + 0x80 * (u64)node_num);
+ hw_stats->rx_pkts += dsaf_read_dev(dsaf_dev,
+ DSAF_INODE_FINAL_IN_PKT_NUM_0_REG + 0x80 * (u64)node_num);
+ hw_stats->rx_pkt_id += dsaf_read_dev(dsaf_dev,
+ DSAF_INODE_SBM_PID_NUM_0_REG + 0x80 * (u64)node_num);
+
+ reg_tmp = is_ver1 ? DSAF_INODE_FINAL_IN_PAUSE_NUM_0_REG :
+ DSAFV2_INODE_FINAL_IN_PAUSE_NUM_0_REG;
+ hw_stats->rx_pause_frame +=
+ dsaf_read_dev(dsaf_dev, reg_tmp + 0x80 * (u64)node_num);
+
+ hw_stats->release_buf_num += dsaf_read_dev(dsaf_dev,
+ DSAF_INODE_SBM_RELS_NUM_0_REG + 0x80 * (u64)node_num);
+ hw_stats->sbm_drop += dsaf_read_dev(dsaf_dev,
+ DSAF_INODE_SBM_DROP_NUM_0_REG + 0x80 * (u64)node_num);
+ hw_stats->crc_false += dsaf_read_dev(dsaf_dev,
+ DSAF_INODE_CRC_FALSE_NUM_0_REG + 0x80 * (u64)node_num);
+ hw_stats->bp_drop += dsaf_read_dev(dsaf_dev,
+ DSAF_INODE_BP_DISCARD_NUM_0_REG + 0x80 * (u64)node_num);
+ hw_stats->rslt_drop += dsaf_read_dev(dsaf_dev,
+ DSAF_INODE_RSLT_DISCARD_NUM_0_REG + 0x80 * (u64)node_num);
+ hw_stats->local_addr_false += dsaf_read_dev(dsaf_dev,
+ DSAF_INODE_LOCAL_ADDR_FALSE_NUM_0_REG + 0x80 * (u64)node_num);
+
+ hw_stats->vlan_drop += dsaf_read_dev(dsaf_dev,
+ DSAF_INODE_SW_VLAN_TAG_DISC_0_REG + 4 * (u64)node_num);
+ hw_stats->stp_drop += dsaf_read_dev(dsaf_dev,
+ DSAF_INODE_IN_DATA_STP_DISC_0_REG + 4 * (u64)node_num);
+
+ /* pfc pause frame statistics stored in dsaf inode*/
+ if ((node_num < DSAF_SERVICE_NW_NUM) && !is_ver1) {
+ for (i = 0; i < DSAF_PRIO_NR; i++) {
+ reg_tmp = hns_dsaf_get_inode_prio_reg(i);
+ hw_stats->rx_pfc[i] += dsaf_read_dev(dsaf_dev,
+ reg_tmp + 0x4 * (u64)node_num);
+ hw_stats->tx_pfc[i] += dsaf_read_dev(dsaf_dev,
+ DSAF_XOD_XGE_PFC_PRIO_CNT_BASE_REG +
+ DSAF_XOD_XGE_PFC_PRIO_CNT_OFFSET * i +
+ 0xF0 * (u64)node_num);
+ }
+ }
+ hw_stats->tx_pkts += dsaf_read_dev(dsaf_dev,
+ DSAF_XOD_RCVPKT_CNT_0_REG + 0x90 * (u64)node_num);
+}
+
+/**
+ *hns_dsaf_get_regs - dump dsaf regs
+ *@ddev: dsaf device
+ *@port: port
+ *@data:data for value of regs
+ */
+void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data)
+{
+ u32 i;
+ u32 j;
+ u32 *p = data;
+ u32 reg_tmp;
+ bool is_ver1 = AE_IS_VER1(ddev->dsaf_ver);
+
+ /* dsaf common registers */
+ p[0] = dsaf_read_dev(ddev, DSAF_SRAM_INIT_OVER_0_REG);
+ p[1] = dsaf_read_dev(ddev, DSAF_CFG_0_REG);
+ p[2] = dsaf_read_dev(ddev, DSAF_ECC_ERR_INVERT_0_REG);
+ p[3] = dsaf_read_dev(ddev, DSAF_ABNORMAL_TIMEOUT_0_REG);
+ p[4] = dsaf_read_dev(ddev, DSAF_FSM_TIMEOUT_0_REG);
+ p[5] = dsaf_read_dev(ddev, DSAF_DSA_REG_CNT_CLR_CE_REG);
+ p[6] = dsaf_read_dev(ddev, DSAF_DSA_SBM_INF_FIFO_THRD_REG);
+ p[7] = dsaf_read_dev(ddev, DSAF_DSA_SRAM_1BIT_ECC_SEL_REG);
+ p[8] = dsaf_read_dev(ddev, DSAF_DSA_SRAM_1BIT_ECC_CNT_REG);
+
+ p[9] = dsaf_read_dev(ddev, DSAF_PFC_EN_0_REG + port * 4);
+ p[10] = dsaf_read_dev(ddev, DSAF_PFC_UNIT_CNT_0_REG + port * 4);
+ p[11] = dsaf_read_dev(ddev, DSAF_XGE_INT_MSK_0_REG + port * 4);
+ p[12] = dsaf_read_dev(ddev, DSAF_XGE_INT_SRC_0_REG + port * 4);
+ p[13] = dsaf_read_dev(ddev, DSAF_XGE_INT_STS_0_REG + port * 4);
+ p[14] = dsaf_read_dev(ddev, DSAF_XGE_INT_MSK_0_REG + port * 4);
+ p[15] = dsaf_read_dev(ddev, DSAF_PPE_INT_MSK_0_REG + port * 4);
+ p[16] = dsaf_read_dev(ddev, DSAF_ROCEE_INT_MSK_0_REG + port * 4);
+ p[17] = dsaf_read_dev(ddev, DSAF_XGE_INT_SRC_0_REG + port * 4);
+ p[18] = dsaf_read_dev(ddev, DSAF_PPE_INT_SRC_0_REG + port * 4);
+ p[19] = dsaf_read_dev(ddev, DSAF_ROCEE_INT_SRC_0_REG + port * 4);
+ p[20] = dsaf_read_dev(ddev, DSAF_XGE_INT_STS_0_REG + port * 4);
+ p[21] = dsaf_read_dev(ddev, DSAF_PPE_INT_STS_0_REG + port * 4);
+ p[22] = dsaf_read_dev(ddev, DSAF_ROCEE_INT_STS_0_REG + port * 4);
+ p[23] = dsaf_read_dev(ddev, DSAF_PPE_QID_CFG_0_REG + port * 4);
+
+ for (i = 0; i < DSAF_SW_PORT_NUM; i++)
+ p[24 + i] = dsaf_read_dev(ddev,
+ DSAF_SW_PORT_TYPE_0_REG + i * 4);
+
+ p[32] = dsaf_read_dev(ddev, DSAF_MIX_DEF_QID_0_REG + port * 4);
+
+ for (i = 0; i < DSAF_SW_PORT_NUM; i++)
+ p[33 + i] = dsaf_read_dev(ddev,
+ DSAF_PORT_DEF_VLAN_0_REG + i * 4);
+
+ for (i = 0; i < DSAF_TOTAL_QUEUE_NUM; i++)
+ p[41 + i] = dsaf_read_dev(ddev,
+ DSAF_VM_DEF_VLAN_0_REG + i * 4);
+
+ /* dsaf inode registers */
+ p[170] = dsaf_read_dev(ddev, DSAF_INODE_CUT_THROUGH_CFG_0_REG);
+
+ p[171] = dsaf_read_dev(ddev,
+ DSAF_INODE_ECC_ERR_ADDR_0_REG + port * 0x80);
+
+ for (i = 0; i < DSAF_INODE_NUM / DSAF_COMM_CHN; i++) {
+ j = i * DSAF_COMM_CHN + port;
+ p[172 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_IN_PORT_NUM_0_REG + j * 0x80);
+ p[175 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_PRI_TC_CFG_0_REG + j * 0x80);
+ p[178 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_BP_STATUS_0_REG + j * 0x80);
+ p[181 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_PAD_DISCARD_NUM_0_REG + j * 0x80);
+ p[184 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_FINAL_IN_MAN_NUM_0_REG + j * 0x80);
+ p[187 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_FINAL_IN_PKT_NUM_0_REG + j * 0x80);
+ p[190 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_SBM_PID_NUM_0_REG + j * 0x80);
+ reg_tmp = is_ver1 ? DSAF_INODE_FINAL_IN_PAUSE_NUM_0_REG :
+ DSAFV2_INODE_FINAL_IN_PAUSE_NUM_0_REG;
+ p[193 + i] = dsaf_read_dev(ddev, reg_tmp + j * 0x80);
+ p[196 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_SBM_RELS_NUM_0_REG + j * 0x80);
+ p[199 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_SBM_DROP_NUM_0_REG + j * 0x80);
+ p[202 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_CRC_FALSE_NUM_0_REG + j * 0x80);
+ p[205 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_BP_DISCARD_NUM_0_REG + j * 0x80);
+ p[208 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_RSLT_DISCARD_NUM_0_REG + j * 0x80);
+ p[211 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_LOCAL_ADDR_FALSE_NUM_0_REG + j * 0x80);
+ p[214 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_VOQ_OVER_NUM_0_REG + j * 0x80);
+ p[217 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_BD_SAVE_STATUS_0_REG + j * 4);
+ p[220 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_BD_ORDER_STATUS_0_REG + j * 4);
+ p[223 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_SW_VLAN_TAG_DISC_0_REG + j * 4);
+ p[226 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_IN_DATA_STP_DISC_0_REG + j * 4);
+ }
+
+ p[229] = dsaf_read_dev(ddev, DSAF_INODE_GE_FC_EN_0_REG + port * 4);
+
+ for (i = 0; i < DSAF_INODE_NUM / DSAF_COMM_CHN; i++) {
+ j = i * DSAF_COMM_CHN + port;
+ p[230 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_VC0_IN_PKT_NUM_0_REG + j * 4);
+ }
+
+ p[233] = dsaf_read_dev(ddev,
+ DSAF_INODE_VC1_IN_PKT_NUM_0_REG + port * 0x80);
+
+ /* dsaf inode registers */
+ for (i = 0; i < HNS_DSAF_SBM_NUM(ddev) / DSAF_COMM_CHN; i++) {
+ j = i * DSAF_COMM_CHN + port;
+ p[234 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_CFG_REG_0_REG + j * 0x80);
+ p[237 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_BP_CFG_0_XGE_REG_0_REG + j * 0x80);
+ p[240 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_BP_CFG_1_REG_0_REG + j * 0x80);
+ p[243 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_BP_CFG_2_XGE_REG_0_REG + j * 0x80);
+ p[246 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_FREE_CNT_0_0_REG + j * 0x80);
+ p[249 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_FREE_CNT_1_0_REG + j * 0x80);
+ p[252 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_BP_CNT_0_0_REG + j * 0x80);
+ p[255 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_BP_CNT_1_0_REG + j * 0x80);
+ p[258 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_BP_CNT_2_0_REG + j * 0x80);
+ p[261 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_BP_CNT_3_0_REG + j * 0x80);
+ p[264 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_INER_ST_0_REG + j * 0x80);
+ p[267 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_MIB_REQ_FAILED_TC_0_REG + j * 0x80);
+ p[270 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_LNK_INPORT_CNT_0_REG + j * 0x80);
+ p[273 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_LNK_DROP_CNT_0_REG + j * 0x80);
+ p[276 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_INF_OUTPORT_CNT_0_REG + j * 0x80);
+ p[279 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_LNK_INPORT_TC0_CNT_0_REG + j * 0x80);
+ p[282 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_LNK_INPORT_TC1_CNT_0_REG + j * 0x80);
+ p[285 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_LNK_INPORT_TC2_CNT_0_REG + j * 0x80);
+ p[288 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_LNK_INPORT_TC3_CNT_0_REG + j * 0x80);
+ p[291 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_LNK_INPORT_TC4_CNT_0_REG + j * 0x80);
+ p[294 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_LNK_INPORT_TC5_CNT_0_REG + j * 0x80);
+ p[297 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_LNK_INPORT_TC6_CNT_0_REG + j * 0x80);
+ p[300 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_LNK_INPORT_TC7_CNT_0_REG + j * 0x80);
+ p[303 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_LNK_REQ_CNT_0_REG + j * 0x80);
+ p[306 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_LNK_RELS_CNT_0_REG + j * 0x80);
+ p[309 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_BP_CFG_3_REG_0_REG + j * 0x80);
+ p[312 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_BP_CFG_4_REG_0_REG + j * 0x80);
+ }
+
+ /* dsaf onode registers */
+ for (i = 0; i < DSAF_XOD_NUM; i++) {
+ p[315 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_ETS_TSA_TC0_TC3_CFG_0_REG + i * 0x90);
+ p[323 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_ETS_TSA_TC4_TC7_CFG_0_REG + i * 0x90);
+ p[331 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_ETS_BW_TC0_TC3_CFG_0_REG + i * 0x90);
+ p[339 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_ETS_BW_TC4_TC7_CFG_0_REG + i * 0x90);
+ p[347 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_ETS_BW_OFFSET_CFG_0_REG + i * 0x90);
+ p[355 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_ETS_TOKEN_CFG_0_REG + i * 0x90);
+ }
+
+ p[363] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_0_0_REG + port * 0x90);
+ p[364] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_1_0_REG + port * 0x90);
+ p[365] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_2_0_REG + port * 0x90);
+
+ for (i = 0; i < DSAF_XOD_BIG_NUM / DSAF_COMM_CHN; i++) {
+ j = i * DSAF_COMM_CHN + port;
+ p[366 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_GNT_L_0_REG + j * 0x90);
+ p[369 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_GNT_H_0_REG + j * 0x90);
+ p[372 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_CONNECT_STATE_0_REG + j * 0x90);
+ p[375 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_RCVPKT_CNT_0_REG + j * 0x90);
+ p[378 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_RCVTC0_CNT_0_REG + j * 0x90);
+ p[381 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_RCVTC1_CNT_0_REG + j * 0x90);
+ p[384 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_RCVTC2_CNT_0_REG + j * 0x90);
+ p[387 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_RCVTC3_CNT_0_REG + j * 0x90);
+ p[390 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_RCVVC0_CNT_0_REG + j * 0x90);
+ p[393 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_RCVVC1_CNT_0_REG + j * 0x90);
+ }
+
+ p[396] = dsaf_read_dev(ddev,
+ DSAF_XOD_XGE_RCVIN0_CNT_0_REG + port * 0x90);
+ p[397] = dsaf_read_dev(ddev,
+ DSAF_XOD_XGE_RCVIN1_CNT_0_REG + port * 0x90);
+ p[398] = dsaf_read_dev(ddev,
+ DSAF_XOD_XGE_RCVIN2_CNT_0_REG + port * 0x90);
+ p[399] = dsaf_read_dev(ddev,
+ DSAF_XOD_XGE_RCVIN3_CNT_0_REG + port * 0x90);
+ p[400] = dsaf_read_dev(ddev,
+ DSAF_XOD_XGE_RCVIN4_CNT_0_REG + port * 0x90);
+ p[401] = dsaf_read_dev(ddev,
+ DSAF_XOD_XGE_RCVIN5_CNT_0_REG + port * 0x90);
+ p[402] = dsaf_read_dev(ddev,
+ DSAF_XOD_XGE_RCVIN6_CNT_0_REG + port * 0x90);
+ p[403] = dsaf_read_dev(ddev,
+ DSAF_XOD_XGE_RCVIN7_CNT_0_REG + port * 0x90);
+ p[404] = dsaf_read_dev(ddev,
+ DSAF_XOD_PPE_RCVIN0_CNT_0_REG + port * 0x90);
+ p[405] = dsaf_read_dev(ddev,
+ DSAF_XOD_PPE_RCVIN1_CNT_0_REG + port * 0x90);
+ p[406] = dsaf_read_dev(ddev,
+ DSAF_XOD_ROCEE_RCVIN0_CNT_0_REG + port * 0x90);
+ p[407] = dsaf_read_dev(ddev,
+ DSAF_XOD_ROCEE_RCVIN1_CNT_0_REG + port * 0x90);
+ p[408] = dsaf_read_dev(ddev,
+ DSAF_XOD_FIFO_STATUS_0_REG + port * 0x90);
+
+ /* dsaf voq registers */
+ for (i = 0; i < DSAF_VOQ_NUM / DSAF_COMM_CHN; i++) {
+ j = (i * DSAF_COMM_CHN + port) * 0x90;
+ p[409 + i] = dsaf_read_dev(ddev,
+ DSAF_VOQ_ECC_INVERT_EN_0_REG + j);
+ p[412 + i] = dsaf_read_dev(ddev,
+ DSAF_VOQ_SRAM_PKT_NUM_0_REG + j);
+ p[415 + i] = dsaf_read_dev(ddev, DSAF_VOQ_IN_PKT_NUM_0_REG + j);
+ p[418 + i] = dsaf_read_dev(ddev,
+ DSAF_VOQ_OUT_PKT_NUM_0_REG + j);
+ p[421 + i] = dsaf_read_dev(ddev,
+ DSAF_VOQ_ECC_ERR_ADDR_0_REG + j);
+ p[424 + i] = dsaf_read_dev(ddev, DSAF_VOQ_BP_STATUS_0_REG + j);
+ p[427 + i] = dsaf_read_dev(ddev, DSAF_VOQ_SPUP_IDLE_0_REG + j);
+ p[430 + i] = dsaf_read_dev(ddev,
+ DSAF_VOQ_XGE_XOD_REQ_0_0_REG + j);
+ p[433 + i] = dsaf_read_dev(ddev,
+ DSAF_VOQ_XGE_XOD_REQ_1_0_REG + j);
+ p[436 + i] = dsaf_read_dev(ddev,
+ DSAF_VOQ_PPE_XOD_REQ_0_REG + j);
+ p[439 + i] = dsaf_read_dev(ddev,
+ DSAF_VOQ_ROCEE_XOD_REQ_0_REG + j);
+ p[442 + i] = dsaf_read_dev(ddev,
+ DSAF_VOQ_BP_ALL_THRD_0_REG + j);
+ }
+
+ /* dsaf tbl registers */
+ p[445] = dsaf_read_dev(ddev, DSAF_TBL_CTRL_0_REG);
+ p[446] = dsaf_read_dev(ddev, DSAF_TBL_INT_MSK_0_REG);
+ p[447] = dsaf_read_dev(ddev, DSAF_TBL_INT_SRC_0_REG);
+ p[448] = dsaf_read_dev(ddev, DSAF_TBL_INT_STS_0_REG);
+ p[449] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_ADDR_0_REG);
+ p[450] = dsaf_read_dev(ddev, DSAF_TBL_LINE_ADDR_0_REG);
+ p[451] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_HIGH_0_REG);
+ p[452] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_LOW_0_REG);
+ p[453] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_4_0_REG);
+ p[454] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_3_0_REG);
+ p[455] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_2_0_REG);
+ p[456] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_1_0_REG);
+ p[457] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_0_0_REG);
+ p[458] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_UCAST_CFG_0_REG);
+ p[459] = dsaf_read_dev(ddev, DSAF_TBL_LIN_CFG_0_REG);
+ p[460] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RDATA_HIGH_0_REG);
+ p[461] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RDATA_LOW_0_REG);
+ p[462] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA4_0_REG);
+ p[463] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA3_0_REG);
+ p[464] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA2_0_REG);
+ p[465] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA1_0_REG);
+ p[466] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA0_0_REG);
+ p[467] = dsaf_read_dev(ddev, DSAF_TBL_LIN_RDATA_0_REG);
+
+ for (i = 0; i < DSAF_SW_PORT_NUM; i++) {
+ j = i * 0x8;
+ p[468 + 2 * i] = dsaf_read_dev(ddev,
+ DSAF_TBL_DA0_MIS_INFO1_0_REG + j);
+ p[469 + 2 * i] = dsaf_read_dev(ddev,
+ DSAF_TBL_DA0_MIS_INFO0_0_REG + j);
+ }
+
+ p[484] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO2_0_REG);
+ p[485] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO1_0_REG);
+ p[486] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO0_0_REG);
+ p[487] = dsaf_read_dev(ddev, DSAF_TBL_PUL_0_REG);
+ p[488] = dsaf_read_dev(ddev, DSAF_TBL_OLD_RSLT_0_REG);
+ p[489] = dsaf_read_dev(ddev, DSAF_TBL_OLD_SCAN_VAL_0_REG);
+ p[490] = dsaf_read_dev(ddev, DSAF_TBL_DFX_CTRL_0_REG);
+ p[491] = dsaf_read_dev(ddev, DSAF_TBL_DFX_STAT_0_REG);
+ p[492] = dsaf_read_dev(ddev, DSAF_TBL_DFX_STAT_2_0_REG);
+ p[493] = dsaf_read_dev(ddev, DSAF_TBL_LKUP_NUM_I_0_REG);
+ p[494] = dsaf_read_dev(ddev, DSAF_TBL_LKUP_NUM_O_0_REG);
+ p[495] = dsaf_read_dev(ddev, DSAF_TBL_UCAST_BCAST_MIS_INFO_0_0_REG);
+
+ /* dsaf other registers */
+ p[496] = dsaf_read_dev(ddev, DSAF_INODE_FIFO_WL_0_REG + port * 0x4);
+ p[497] = dsaf_read_dev(ddev, DSAF_ONODE_FIFO_WL_0_REG + port * 0x4);
+ p[498] = dsaf_read_dev(ddev, DSAF_XGE_GE_WORK_MODE_0_REG + port * 0x4);
+ p[499] = dsaf_read_dev(ddev,
+ DSAF_XGE_APP_RX_LINK_UP_0_REG + port * 0x4);
+ p[500] = dsaf_read_dev(ddev, DSAF_NETPORT_CTRL_SIG_0_REG + port * 0x4);
+ p[501] = dsaf_read_dev(ddev, DSAF_XGE_CTRL_SIG_CFG_0_REG + port * 0x4);
+
+ if (!is_ver1)
+ p[502] = dsaf_read_dev(ddev, DSAF_PAUSE_CFG_REG + port * 0x4);
+
+ /* mark end of dsaf regs */
+ for (i = 503; i < 504; i++)
+ p[i] = 0xdddddddd;
+}
+
+static char *hns_dsaf_get_node_stats_strings(char *data, int node,
+ struct dsaf_device *dsaf_dev)
+{
+ char *buff = data;
+ int i;
+ bool is_ver1 = AE_IS_VER1(dsaf_dev->dsaf_ver);
+
+ snprintf(buff, ETH_GSTRING_LEN, "innod%d_pad_drop_pkts", node);
+ buff += ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "innod%d_manage_pkts", node);
+ buff += ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "innod%d_rx_pkts", node);
+ buff += ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "innod%d_rx_pkt_id", node);
+ buff += ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "innod%d_rx_pause_frame", node);
+ buff += ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "innod%d_release_buf_num", node);
+ buff += ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "innod%d_sbm_drop_pkts", node);
+ buff += ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "innod%d_crc_false_pkts", node);
+ buff += ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "innod%d_bp_drop_pkts", node);
+ buff += ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "innod%d_lookup_rslt_drop_pkts", node);
+ buff += ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "innod%d_local_rslt_fail_pkts", node);
+ buff += ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "innod%d_vlan_drop_pkts", node);
+ buff += ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "innod%d_stp_drop_pkts", node);
+ buff += ETH_GSTRING_LEN;
+ if (node < DSAF_SERVICE_NW_NUM && !is_ver1) {
+ for (i = 0; i < DSAF_PRIO_NR; i++) {
+ snprintf(buff + 0 * ETH_GSTRING_LEN * DSAF_PRIO_NR,
+ ETH_GSTRING_LEN, "inod%d_pfc_prio%d_pkts",
+ node, i);
+ snprintf(buff + 1 * ETH_GSTRING_LEN * DSAF_PRIO_NR,
+ ETH_GSTRING_LEN, "onod%d_pfc_prio%d_pkts",
+ node, i);
+ buff += ETH_GSTRING_LEN;
+ }
+ buff += 1 * DSAF_PRIO_NR * ETH_GSTRING_LEN;
+ }
+ snprintf(buff, ETH_GSTRING_LEN, "onnod%d_tx_pkts", node);
+ buff += ETH_GSTRING_LEN;
+
+ return buff;
+}
+
+static u64 *hns_dsaf_get_node_stats(struct dsaf_device *ddev, u64 *data,
+ int node_num)
+{
+ u64 *p = data;
+ int i;
+ struct dsaf_hw_stats *hw_stats = &ddev->hw_stats[node_num];
+ bool is_ver1 = AE_IS_VER1(ddev->dsaf_ver);
+
+ p[0] = hw_stats->pad_drop;
+ p[1] = hw_stats->man_pkts;
+ p[2] = hw_stats->rx_pkts;
+ p[3] = hw_stats->rx_pkt_id;
+ p[4] = hw_stats->rx_pause_frame;
+ p[5] = hw_stats->release_buf_num;
+ p[6] = hw_stats->sbm_drop;
+ p[7] = hw_stats->crc_false;
+ p[8] = hw_stats->bp_drop;
+ p[9] = hw_stats->rslt_drop;
+ p[10] = hw_stats->local_addr_false;
+ p[11] = hw_stats->vlan_drop;
+ p[12] = hw_stats->stp_drop;
+ if (node_num < DSAF_SERVICE_NW_NUM && !is_ver1) {
+ for (i = 0; i < DSAF_PRIO_NR; i++) {
+ p[13 + i + 0 * DSAF_PRIO_NR] = hw_stats->rx_pfc[i];
+ p[13 + i + 1 * DSAF_PRIO_NR] = hw_stats->tx_pfc[i];
+ }
+ p[29] = hw_stats->tx_pkts;
+ return &p[30];
+ }
+
+ p[13] = hw_stats->tx_pkts;
+ return &p[14];
+}
+
+/**
+ *hns_dsaf_get_stats - get dsaf statistic
+ *@ddev: dsaf device
+ *@data:statistic value
+ *@port: port num
+ */
+void hns_dsaf_get_stats(struct dsaf_device *ddev, u64 *data, int port)
+{
+ u64 *p = data;
+ int node_num = port;
+
+ /* for ge/xge node info */
+ p = hns_dsaf_get_node_stats(ddev, p, node_num);
+
+ /* for ppe node info */
+ node_num = port + DSAF_PPE_INODE_BASE;
+ (void)hns_dsaf_get_node_stats(ddev, p, node_num);
+}
+
+/**
+ *hns_dsaf_get_sset_count - get dsaf string set count
+ *@dsaf_dev: dsaf device
+ *@stringset: type of values in data
+ *return dsaf string name count
+ */
+int hns_dsaf_get_sset_count(struct dsaf_device *dsaf_dev, int stringset)
+{
+ bool is_ver1 = AE_IS_VER1(dsaf_dev->dsaf_ver);
+
+ if (stringset == ETH_SS_STATS) {
+ if (is_ver1)
+ return DSAF_STATIC_NUM;
+ else
+ return DSAF_V2_STATIC_NUM;
+ }
+ return 0;
+}
+
+/**
+ *hns_dsaf_get_strings - get dsaf string set
+ *@stringset:srting set index
+ *@data:strings name value
+ *@port:port index
+ *@dsaf_dev: dsaf device
+ */
+void hns_dsaf_get_strings(int stringset, u8 *data, int port,
+ struct dsaf_device *dsaf_dev)
+{
+ char *buff = (char *)data;
+ int node = port;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ /* for ge/xge node info */
+ buff = hns_dsaf_get_node_stats_strings(buff, node, dsaf_dev);
+
+ /* for ppe node info */
+ node = port + DSAF_PPE_INODE_BASE;
+ (void)hns_dsaf_get_node_stats_strings(buff, node, dsaf_dev);
+}
+
+/**
+ *hns_dsaf_get_regs_count - get dsaf regs count
+ *return dsaf regs count
+ */
+int hns_dsaf_get_regs_count(void)
+{
+ return DSAF_DUMP_REGS_NUM;
+}
+
+static int hns_dsaf_get_port_id(u8 port)
+{
+ if (port < DSAF_SERVICE_NW_NUM)
+ return port;
+
+ if (port >= DSAF_BASE_INNER_PORT_NUM)
+ return port - DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM;
+
+ return -EINVAL;
+}
+
+static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port)
+{
+ struct dsaf_tbl_tcam_ucast_cfg tbl_tcam_ucast = {0, 1, 0, 0, 0x80};
+ struct dsaf_tbl_tcam_data tbl_tcam_data_mc = {0x01000000, port};
+ struct dsaf_tbl_tcam_data tbl_tcam_mask_uc = {0x01000000, 0xf};
+ struct dsaf_tbl_tcam_mcast_cfg tbl_tcam_mcast = {0, 0, {0} };
+ struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev);
+ struct dsaf_tbl_tcam_data tbl_tcam_data_uc = {0, port};
+ struct dsaf_drv_mac_single_dest_entry mask_entry;
+ struct dsaf_drv_tbl_tcam_key temp_key, mask_key;
+ struct dsaf_drv_soft_mac_tbl *soft_mac_entry;
+ u16 entry_index;
+ struct dsaf_drv_tbl_tcam_key mac_key;
+ struct hns_mac_cb *mac_cb;
+ u8 addr[ETH_ALEN] = {0};
+ u8 port_num;
+ int mskid;
+
+ /* promisc use vague table match with vlanid = 0 & macaddr = 0 */
+ hns_dsaf_set_mac_key(dsaf_dev, &mac_key, 0x00, port, addr);
+ entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
+ if (entry_index != DSAF_INVALID_ENTRY_IDX)
+ return;
+
+ /* put promisc tcam entry in the end. */
+ /* 1. set promisc unicast vague tcam entry. */
+ entry_index = hns_dsaf_find_empty_mac_entry_reverse(dsaf_dev);
+ if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+ dev_err(dsaf_dev->dev,
+ "enable uc promisc failed (port:%#x)\n",
+ port);
+ return;
+ }
+
+ mac_cb = dsaf_dev->mac_cb[port];
+ (void)hns_mac_get_inner_port_num(mac_cb, 0, &port_num);
+ tbl_tcam_ucast.tbl_ucast_out_port = port_num;
+
+ /* config uc vague table */
+ hns_dsaf_tcam_uc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_uc,
+ &tbl_tcam_mask_uc, &tbl_tcam_ucast);
+
+ /* update software entry */
+ soft_mac_entry = priv->soft_mac_tbl;
+ soft_mac_entry += entry_index;
+ soft_mac_entry->index = entry_index;
+ soft_mac_entry->tcam_key.high.val = mac_key.high.val;
+ soft_mac_entry->tcam_key.low.val = mac_key.low.val;
+ /* step back to the START for mc. */
+ soft_mac_entry = priv->soft_mac_tbl;
+
+ /* 2. set promisc multicast vague tcam entry. */
+ entry_index = hns_dsaf_find_empty_mac_entry_reverse(dsaf_dev);
+ if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+ dev_err(dsaf_dev->dev,
+ "enable mc promisc failed (port:%#x)\n",
+ port);
+ return;
+ }
+
+ memset(&mask_entry, 0x0, sizeof(mask_entry));
+ memset(&mask_key, 0x0, sizeof(mask_key));
+ memset(&temp_key, 0x0, sizeof(temp_key));
+ mask_entry.addr[0] = 0x01;
+ hns_dsaf_set_mac_key(dsaf_dev, &mask_key, mask_entry.in_vlan_id,
+ 0xf, mask_entry.addr);
+ tbl_tcam_mcast.tbl_mcast_item_vld = 1;
+ tbl_tcam_mcast.tbl_mcast_old_en = 0;
+
+ /* set MAC port to handle multicast */
+ mskid = hns_dsaf_get_port_id(port);
+ if (mskid == -EINVAL) {
+ dev_err(dsaf_dev->dev, "%s,pnum(%d)error,key(%#x:%#x)\n",
+ dsaf_dev->ae_dev.name, port,
+ mask_key.high.val, mask_key.low.val);
+ return;
+ }
+ dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32],
+ mskid % 32, 1);
+
+ /* set pool bit map to handle multicast */
+ mskid = hns_dsaf_get_port_id(port_num);
+ if (mskid == -EINVAL) {
+ dev_err(dsaf_dev->dev,
+ "%s, pool bit map pnum(%d)error,key(%#x:%#x)\n",
+ dsaf_dev->ae_dev.name, port_num,
+ mask_key.high.val, mask_key.low.val);
+ return;
+ }
+ dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32],
+ mskid % 32, 1);
+
+ memcpy(&temp_key, &mask_key, sizeof(mask_key));
+ hns_dsaf_tcam_mc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_mc,
+ (struct dsaf_tbl_tcam_data *)(&mask_key),
+ &tbl_tcam_mcast);
+
+ /* update software entry */
+ soft_mac_entry += entry_index;
+ soft_mac_entry->index = entry_index;
+ soft_mac_entry->tcam_key.high.val = temp_key.high.val;
+ soft_mac_entry->tcam_key.low.val = temp_key.low.val;
+}
+
+static void set_promisc_tcam_disable(struct dsaf_device *dsaf_dev, u32 port)
+{
+ struct dsaf_tbl_tcam_data tbl_tcam_data_mc = {0x01000000, port};
+ struct dsaf_tbl_tcam_ucast_cfg tbl_tcam_ucast = {0, 0, 0, 0, 0};
+ struct dsaf_tbl_tcam_mcast_cfg tbl_tcam_mcast = {0, 0, {0} };
+ struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev);
+ struct dsaf_tbl_tcam_data tbl_tcam_data_uc = {0, 0};
+ struct dsaf_tbl_tcam_data tbl_tcam_mask = {0, 0};
+ struct dsaf_drv_soft_mac_tbl *soft_mac_entry;
+ u16 entry_index;
+ struct dsaf_drv_tbl_tcam_key mac_key;
+ u8 addr[ETH_ALEN] = {0};
+
+ /* 1. delete uc vague tcam entry. */
+ /* promisc use vague table match with vlanid = 0 & macaddr = 0 */
+ hns_dsaf_set_mac_key(dsaf_dev, &mac_key, 0x00, port, addr);
+ entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
+
+ if (entry_index == DSAF_INVALID_ENTRY_IDX)
+ return;
+
+ /* config uc vague table */
+ hns_dsaf_tcam_uc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_uc,
+ &tbl_tcam_mask, &tbl_tcam_ucast);
+ /* update soft management table. */
+ soft_mac_entry = priv->soft_mac_tbl;
+ soft_mac_entry += entry_index;
+ soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX;
+ /* step back to the START for mc. */
+ soft_mac_entry = priv->soft_mac_tbl;
+
+ /* 2. delete mc vague tcam entry. */
+ addr[0] = 0x01;
+ memset(&mac_key, 0x0, sizeof(mac_key));
+ hns_dsaf_set_mac_key(dsaf_dev, &mac_key, 0x00, port, addr);
+ entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
+
+ if (entry_index == DSAF_INVALID_ENTRY_IDX)
+ return;
+
+ /* config mc vague table */
+ hns_dsaf_tcam_mc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_mc,
+ &tbl_tcam_mask, &tbl_tcam_mcast);
+ /* update soft management table. */
+ soft_mac_entry += entry_index;
+ soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX;
+}
+
+/* Reserve the last TCAM entry for promisc support */
+void hns_dsaf_set_promisc_tcam(struct dsaf_device *dsaf_dev,
+ u32 port, bool enable)
+{
+ if (enable)
+ set_promisc_tcam_enable(dsaf_dev, port);
+ else
+ set_promisc_tcam_disable(dsaf_dev, port);
+}
+
+int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port)
+{
+ u32 val, val_tmp;
+ int wait_cnt;
+
+ if (port >= DSAF_SERVICE_NW_NUM)
+ return 0;
+
+ wait_cnt = 0;
+ while (wait_cnt++ < HNS_MAX_WAIT_CNT) {
+ val = dsaf_read_dev(dsaf_dev, DSAF_VOQ_IN_PKT_NUM_0_REG +
+ (port + DSAF_XGE_NUM) * 0x40);
+ val_tmp = dsaf_read_dev(dsaf_dev, DSAF_VOQ_OUT_PKT_NUM_0_REG +
+ (port + DSAF_XGE_NUM) * 0x40);
+ if (val == val_tmp)
+ break;
+
+ usleep_range(100, 200);
+ }
+
+ if (wait_cnt >= HNS_MAX_WAIT_CNT) {
+ dev_err(dsaf_dev->dev, "hns dsaf clean wait timeout(%u - %u).\n",
+ val, val_tmp);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+/**
+ * hns_dsaf_probe - probo dsaf dev
+ * @pdev: dasf platform device
+ * return 0 - success , negative --fail
+ */
+static int hns_dsaf_probe(struct platform_device *pdev)
+{
+ struct dsaf_device *dsaf_dev;
+ int ret;
+
+ dsaf_dev = hns_dsaf_alloc_dev(&pdev->dev, sizeof(struct dsaf_drv_priv));
+ if (IS_ERR(dsaf_dev)) {
+ ret = PTR_ERR(dsaf_dev);
+ dev_err(&pdev->dev,
+ "dsaf_probe dsaf_alloc_dev failed, ret = %#x!\n", ret);
+ return ret;
+ }
+
+ ret = hns_dsaf_get_cfg(dsaf_dev);
+ if (ret)
+ goto free_dev;
+
+ ret = hns_dsaf_init(dsaf_dev);
+ if (ret)
+ goto free_dev;
+
+ ret = hns_mac_init(dsaf_dev);
+ if (ret)
+ goto uninit_dsaf;
+
+ ret = hns_ppe_init(dsaf_dev);
+ if (ret)
+ goto uninit_mac;
+
+ ret = hns_dsaf_ae_init(dsaf_dev);
+ if (ret)
+ goto uninit_ppe;
+
+ return 0;
+
+uninit_ppe:
+ hns_ppe_uninit(dsaf_dev);
+
+uninit_mac:
+ hns_mac_uninit(dsaf_dev);
+
+uninit_dsaf:
+ hns_dsaf_free(dsaf_dev);
+
+free_dev:
+ hns_dsaf_free_dev(dsaf_dev);
+
+ return ret;
+}
+
+/**
+ * hns_dsaf_remove - remove dsaf dev
+ * @pdev: dasf platform device
+ */
+static int hns_dsaf_remove(struct platform_device *pdev)
+{
+ struct dsaf_device *dsaf_dev = dev_get_drvdata(&pdev->dev);
+
+ hns_dsaf_ae_uninit(dsaf_dev);
+
+ hns_ppe_uninit(dsaf_dev);
+
+ hns_mac_uninit(dsaf_dev);
+
+ hns_dsaf_free(dsaf_dev);
+
+ hns_dsaf_free_dev(dsaf_dev);
+
+ return 0;
+}
+
+static const struct of_device_id g_dsaf_match[] = {
+ {.compatible = "hisilicon,hns-dsaf-v1"},
+ {.compatible = "hisilicon,hns-dsaf-v2"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, g_dsaf_match);
+
+static struct platform_driver g_dsaf_driver = {
+ .probe = hns_dsaf_probe,
+ .remove = hns_dsaf_remove,
+ .driver = {
+ .name = DSAF_DRV_NAME,
+ .of_match_table = g_dsaf_match,
+ .acpi_match_table = hns_dsaf_acpi_match,
+ },
+};
+
+module_platform_driver(g_dsaf_driver);
+
+/**
+ * hns_dsaf_roce_reset - reset dsaf and roce
+ * @dsaf_fwnode: Pointer to framework node for the dasf
+ * @dereset: false - request reset , true - drop reset
+ * return 0 - success , negative -fail
+ */
+int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset)
+{
+ struct dsaf_device *dsaf_dev;
+ struct platform_device *pdev;
+ u32 mp;
+ u32 sl;
+ u32 credit;
+ int i;
+ static const u32 port_map[DSAF_ROCE_CREDIT_CHN][DSAF_ROCE_CHAN_MODE_NUM] = {
+ {DSAF_ROCE_PORT_0, DSAF_ROCE_PORT_0, DSAF_ROCE_PORT_0},
+ {DSAF_ROCE_PORT_1, DSAF_ROCE_PORT_0, DSAF_ROCE_PORT_0},
+ {DSAF_ROCE_PORT_2, DSAF_ROCE_PORT_1, DSAF_ROCE_PORT_0},
+ {DSAF_ROCE_PORT_3, DSAF_ROCE_PORT_1, DSAF_ROCE_PORT_0},
+ {DSAF_ROCE_PORT_4, DSAF_ROCE_PORT_2, DSAF_ROCE_PORT_1},
+ {DSAF_ROCE_PORT_4, DSAF_ROCE_PORT_2, DSAF_ROCE_PORT_1},
+ {DSAF_ROCE_PORT_5, DSAF_ROCE_PORT_3, DSAF_ROCE_PORT_1},
+ {DSAF_ROCE_PORT_5, DSAF_ROCE_PORT_3, DSAF_ROCE_PORT_1},
+ };
+ static const u32 sl_map[DSAF_ROCE_CREDIT_CHN][DSAF_ROCE_CHAN_MODE_NUM] = {
+ {DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_0},
+ {DSAF_ROCE_SL_0, DSAF_ROCE_SL_1, DSAF_ROCE_SL_1},
+ {DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_2},
+ {DSAF_ROCE_SL_0, DSAF_ROCE_SL_1, DSAF_ROCE_SL_3},
+ {DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_0},
+ {DSAF_ROCE_SL_1, DSAF_ROCE_SL_1, DSAF_ROCE_SL_1},
+ {DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_2},
+ {DSAF_ROCE_SL_1, DSAF_ROCE_SL_1, DSAF_ROCE_SL_3},
+ };
+
+ /* find the platform device corresponding to fwnode */
+ if (is_of_node(dsaf_fwnode)) {
+ pdev = of_find_device_by_node(to_of_node(dsaf_fwnode));
+ } else if (is_acpi_device_node(dsaf_fwnode)) {
+ pdev = hns_dsaf_find_platform_device(dsaf_fwnode);
+ } else {
+ pr_err("fwnode is neither OF or ACPI type\n");
+ return -EINVAL;
+ }
+
+ /* check if we were a success in fetching pdev */
+ if (!pdev) {
+ pr_err("couldn't find platform device for node\n");
+ return -ENODEV;
+ }
+
+ /* retrieve the dsaf_device from the driver data */
+ dsaf_dev = dev_get_drvdata(&pdev->dev);
+ if (!dsaf_dev) {
+ dev_err(&pdev->dev, "dsaf_dev is NULL\n");
+ put_device(&pdev->dev);
+ return -ENODEV;
+ }
+
+ /* now, make sure we are running on compatible SoC */
+ if (AE_IS_VER1(dsaf_dev->dsaf_ver)) {
+ dev_err(dsaf_dev->dev, "%s v1 chip doesn't support RoCE!\n",
+ dsaf_dev->ae_dev.name);
+ put_device(&pdev->dev);
+ return -ENODEV;
+ }
+
+ /* do reset or de-reset according to the flag */
+ if (!dereset) {
+ /* reset rocee-channels in dsaf and rocee */
+ dsaf_dev->misc_op->hns_dsaf_srst_chns(dsaf_dev, DSAF_CHNS_MASK,
+ false);
+ dsaf_dev->misc_op->hns_dsaf_roce_srst(dsaf_dev, false);
+ } else {
+ /* configure dsaf tx roce correspond to port map and sl map */
+ mp = dsaf_read_dev(dsaf_dev, DSAF_ROCE_PORT_MAP_REG);
+ for (i = 0; i < DSAF_ROCE_CREDIT_CHN; i++)
+ dsaf_set_field(mp, 7 << i * 3, i * 3,
+ port_map[i][DSAF_ROCE_6PORT_MODE]);
+ dsaf_set_field(mp, 3 << i * 3, i * 3, 0);
+ dsaf_write_dev(dsaf_dev, DSAF_ROCE_PORT_MAP_REG, mp);
+
+ sl = dsaf_read_dev(dsaf_dev, DSAF_ROCE_SL_MAP_REG);
+ for (i = 0; i < DSAF_ROCE_CREDIT_CHN; i++)
+ dsaf_set_field(sl, 3 << i * 2, i * 2,
+ sl_map[i][DSAF_ROCE_6PORT_MODE]);
+ dsaf_write_dev(dsaf_dev, DSAF_ROCE_SL_MAP_REG, sl);
+
+ /* de-reset rocee-channels in dsaf and rocee */
+ dsaf_dev->misc_op->hns_dsaf_srst_chns(dsaf_dev, DSAF_CHNS_MASK,
+ true);
+ msleep(SRST_TIME_INTERVAL);
+ dsaf_dev->misc_op->hns_dsaf_roce_srst(dsaf_dev, true);
+
+ /* enable dsaf channel rocee credit */
+ credit = dsaf_read_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG);
+ dsaf_set_bit(credit, DSAF_SBM_ROCEE_CFG_CRD_EN_B, 0);
+ dsaf_write_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG, credit);
+
+ dsaf_set_bit(credit, DSAF_SBM_ROCEE_CFG_CRD_EN_B, 1);
+ dsaf_write_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG, credit);
+ }
+
+ put_device(&pdev->dev);
+
+ return 0;
+}
+EXPORT_SYMBOL(hns_dsaf_roce_reset);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
+MODULE_DESCRIPTION("HNS DSAF driver");
+MODULE_VERSION(DSAF_MOD_VERSION);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
new file mode 100644
index 0000000000..5526a10caa
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
@@ -0,0 +1,468 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ */
+
+#ifndef __HNS_DSAF_MAIN_H
+#define __HNS_DSAF_MAIN_H
+#include "hnae.h"
+
+#include "hns_dsaf_reg.h"
+#include "hns_dsaf_mac.h"
+
+struct hns_mac_cb;
+
+#define DSAF_DRV_NAME "hns_dsaf"
+#define DSAF_MOD_VERSION "v1.0"
+#define DSAF_DEVICE_NAME "dsaf"
+
+#define HNS_DSAF_DEBUG_NW_REG_OFFSET 0x100000
+
+#define DSAF_BASE_INNER_PORT_NUM 127/* mac tbl qid*/
+
+#define DSAF_MAX_CHIP_NUM 2 /*max 2 chips */
+
+#define DSAF_DEFAUTL_QUEUE_NUM_PER_PPE 22
+
+#define HNS_DSAF_MAX_DESC_CNT 1024
+#define HNS_DSAF_MIN_DESC_CNT 16
+
+#define DSAF_INVALID_ENTRY_IDX 0xffff
+
+#define DSAF_CFG_READ_CNT 30
+
+#define DSAF_DUMP_REGS_NUM 504
+#define DSAF_STATIC_NUM 28
+#define DSAF_V2_STATIC_NUM 44
+#define DSAF_PRIO_NR 8
+#define DSAF_REG_PER_ZONE 3
+
+#define DSAF_ROCE_CREDIT_CHN 8
+#define DSAF_ROCE_CHAN_MODE 3
+
+#define HNS_MAX_WAIT_CNT 10000
+
+enum dsaf_roce_port_mode {
+ DSAF_ROCE_6PORT_MODE,
+ DSAF_ROCE_4PORT_MODE,
+ DSAF_ROCE_2PORT_MODE,
+ DSAF_ROCE_CHAN_MODE_NUM,
+};
+
+enum dsaf_roce_port_num {
+ DSAF_ROCE_PORT_0,
+ DSAF_ROCE_PORT_1,
+ DSAF_ROCE_PORT_2,
+ DSAF_ROCE_PORT_3,
+ DSAF_ROCE_PORT_4,
+ DSAF_ROCE_PORT_5,
+};
+
+enum dsaf_roce_qos_sl {
+ DSAF_ROCE_SL_0,
+ DSAF_ROCE_SL_1,
+ DSAF_ROCE_SL_2,
+ DSAF_ROCE_SL_3,
+};
+
+#define DSAF_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
+#define HNS_DSAF_IS_DEBUG(dev) ((dev)->dsaf_mode == DSAF_MODE_DISABLE_SP)
+
+enum hal_dsaf_mode {
+ HRD_DSAF_NO_DSAF_MODE = 0x0,
+ HRD_DSAF_MODE = 0x1,
+};
+
+enum hal_dsaf_tc_mode {
+ HRD_DSAF_4TC_MODE = 0X0,
+ HRD_DSAF_8TC_MODE = 0X1,
+};
+
+struct dsaf_vm_def_vlan {
+ u32 vm_def_vlan_id;
+ u32 vm_def_vlan_cfi;
+ u32 vm_def_vlan_pri;
+};
+
+struct dsaf_tbl_tcam_data {
+ u32 tbl_tcam_data_high;
+ u32 tbl_tcam_data_low;
+};
+
+#define DSAF_PORT_MSK_NUM \
+ ((DSAF_TOTAL_QUEUE_NUM + DSAF_SERVICE_NW_NUM - 1) / 32 + 1)
+struct dsaf_tbl_tcam_mcast_cfg {
+ u8 tbl_mcast_old_en;
+ u8 tbl_mcast_item_vld;
+ u32 tbl_mcast_port_msk[DSAF_PORT_MSK_NUM];
+};
+
+struct dsaf_tbl_tcam_ucast_cfg {
+ u32 tbl_ucast_old_en;
+ u32 tbl_ucast_item_vld;
+ u32 tbl_ucast_mac_discard;
+ u32 tbl_ucast_dvc;
+ u32 tbl_ucast_out_port;
+};
+
+struct dsaf_tbl_line_cfg {
+ u32 tbl_line_mac_discard;
+ u32 tbl_line_dvc;
+ u32 tbl_line_out_port;
+};
+
+enum dsaf_port_rate_mode {
+ DSAF_PORT_RATE_1000 = 0,
+ DSAF_PORT_RATE_2500,
+ DSAF_PORT_RATE_10000
+};
+
+enum dsaf_stp_port_type {
+ DSAF_STP_PORT_TYPE_DISCARD = 0,
+ DSAF_STP_PORT_TYPE_BLOCK = 1,
+ DSAF_STP_PORT_TYPE_LISTEN = 2,
+ DSAF_STP_PORT_TYPE_LEARN = 3,
+ DSAF_STP_PORT_TYPE_FORWARD = 4
+};
+
+enum dsaf_sw_port_type {
+ DSAF_SW_PORT_TYPE_NON_VLAN = 0,
+ DSAF_SW_PORT_TYPE_ACCESS = 1,
+ DSAF_SW_PORT_TYPE_TRUNK = 2,
+};
+
+#define DSAF_SUB_BASE_SIZE (0x10000)
+
+/* dsaf mode define */
+enum dsaf_mode {
+ DSAF_MODE_INVALID = 0, /**< Invalid dsaf mode */
+ DSAF_MODE_ENABLE_FIX, /**< en DSAF-mode, fixed to queue*/
+ DSAF_MODE_ENABLE_0VM, /**< en DSAF-mode, support 0 VM */
+ DSAF_MODE_ENABLE_8VM, /**< en DSAF-mode, support 8 VM */
+ DSAF_MODE_ENABLE_16VM, /**< en DSAF-mode, support 16 VM */
+ DSAF_MODE_ENABLE_32VM, /**< en DSAF-mode, support 32 VM */
+ DSAF_MODE_ENABLE_128VM, /**< en DSAF-mode, support 128 VM */
+ DSAF_MODE_ENABLE, /**< before is enable DSAF mode*/
+ DSAF_MODE_DISABLE_SP, /* <non-dsaf, single port mode */
+ DSAF_MODE_DISABLE_FIX, /**< non-dasf, fixed to queue*/
+ DSAF_MODE_DISABLE_2PORT_8VM, /**< non-dasf, 2port 8VM */
+ DSAF_MODE_DISABLE_2PORT_16VM, /**< non-dasf, 2port 16VM */
+ DSAF_MODE_DISABLE_2PORT_64VM, /**< non-dasf, 2port 64VM */
+ DSAF_MODE_DISABLE_6PORT_0VM, /**< non-dasf, 6port 0VM */
+ DSAF_MODE_DISABLE_6PORT_2VM, /**< non-dasf, 6port 2VM */
+ DSAF_MODE_DISABLE_6PORT_4VM, /**< non-dasf, 6port 4VM */
+ DSAF_MODE_DISABLE_6PORT_16VM, /**< non-dasf, 6port 16VM */
+ DSAF_MODE_MAX /**< the last one, use as the num */
+};
+
+#define DSAF_DEST_PORT_NUM 256 /* DSAF max port num */
+#define DSAF_WORD_BIT_CNT 32 /* the num bit of word */
+
+/*mac entry, mc or uc entry*/
+struct dsaf_drv_mac_single_dest_entry {
+ /* mac addr, match the entry*/
+ u8 addr[ETH_ALEN];
+ u16 in_vlan_id; /* value of VlanId */
+
+ /* the vld input port num, dsaf-mode fix 0, */
+ /* non-dasf is the entry whitch port vld*/
+ u8 in_port_num;
+
+ u8 port_num; /*output port num*/
+ u8 rsv[6];
+};
+
+/*only mc entry*/
+struct dsaf_drv_mac_multi_dest_entry {
+ /* mac addr, match the entry*/
+ u8 addr[ETH_ALEN];
+ u16 in_vlan_id;
+ /* this mac addr output port,*/
+ /* bit0-bit5 means Port0-Port5(1bit is vld)**/
+ u32 port_mask[DSAF_DEST_PORT_NUM / DSAF_WORD_BIT_CNT];
+
+ /* the vld input port num, dsaf-mode fix 0,*/
+ /* non-dasf is the entry whitch port vld*/
+ u8 in_port_num;
+ u8 rsv[7];
+};
+
+struct dsaf_hw_stats {
+ u64 pad_drop;
+ u64 man_pkts;
+ u64 rx_pkts;
+ u64 rx_pkt_id;
+ u64 rx_pause_frame;
+ u64 release_buf_num;
+ u64 sbm_drop;
+ u64 crc_false;
+ u64 bp_drop;
+ u64 rslt_drop;
+ u64 local_addr_false;
+ u64 vlan_drop;
+ u64 stp_drop;
+ u64 rx_pfc[DSAF_PRIO_NR];
+ u64 tx_pfc[DSAF_PRIO_NR];
+ u64 tx_pkts;
+};
+
+struct hnae_vf_cb {
+ u8 port_index;
+ struct hns_mac_cb *mac_cb;
+ struct dsaf_device *dsaf_dev;
+ struct hnae_handle ae_handle; /* must be the last member */
+};
+
+struct dsaf_int_xge_src {
+ u32 xid_xge_ecc_err_int_src;
+ u32 xid_xge_fsm_timout_int_src;
+ u32 sbm_xge_lnk_fsm_timout_int_src;
+ u32 sbm_xge_lnk_ecc_2bit_int_src;
+ u32 sbm_xge_mib_req_failed_int_src;
+ u32 sbm_xge_mib_req_fsm_timout_int_src;
+ u32 sbm_xge_mib_rels_fsm_timout_int_src;
+ u32 sbm_xge_sram_ecc_2bit_int_src;
+ u32 sbm_xge_mib_buf_sum_err_int_src;
+ u32 sbm_xge_mib_req_extra_int_src;
+ u32 sbm_xge_mib_rels_extra_int_src;
+ u32 voq_xge_start_to_over_0_int_src;
+ u32 voq_xge_start_to_over_1_int_src;
+ u32 voq_xge_ecc_err_int_src;
+};
+
+struct dsaf_int_ppe_src {
+ u32 xid_ppe_fsm_timout_int_src;
+ u32 sbm_ppe_lnk_fsm_timout_int_src;
+ u32 sbm_ppe_lnk_ecc_2bit_int_src;
+ u32 sbm_ppe_mib_req_failed_int_src;
+ u32 sbm_ppe_mib_req_fsm_timout_int_src;
+ u32 sbm_ppe_mib_rels_fsm_timout_int_src;
+ u32 sbm_ppe_sram_ecc_2bit_int_src;
+ u32 sbm_ppe_mib_buf_sum_err_int_src;
+ u32 sbm_ppe_mib_req_extra_int_src;
+ u32 sbm_ppe_mib_rels_extra_int_src;
+ u32 voq_ppe_start_to_over_0_int_src;
+ u32 voq_ppe_ecc_err_int_src;
+ u32 xod_ppe_fifo_rd_empty_int_src;
+ u32 xod_ppe_fifo_wr_full_int_src;
+};
+
+struct dsaf_int_rocee_src {
+ u32 xid_rocee_fsm_timout_int_src;
+ u32 sbm_rocee_lnk_fsm_timout_int_src;
+ u32 sbm_rocee_lnk_ecc_2bit_int_src;
+ u32 sbm_rocee_mib_req_failed_int_src;
+ u32 sbm_rocee_mib_req_fsm_timout_int_src;
+ u32 sbm_rocee_mib_rels_fsm_timout_int_src;
+ u32 sbm_rocee_sram_ecc_2bit_int_src;
+ u32 sbm_rocee_mib_buf_sum_err_int_src;
+ u32 sbm_rocee_mib_req_extra_int_src;
+ u32 sbm_rocee_mib_rels_extra_int_src;
+ u32 voq_rocee_start_to_over_0_int_src;
+ u32 voq_rocee_ecc_err_int_src;
+};
+
+struct dsaf_int_tbl_src {
+ u32 tbl_da0_mis_src;
+ u32 tbl_da1_mis_src;
+ u32 tbl_da2_mis_src;
+ u32 tbl_da3_mis_src;
+ u32 tbl_da4_mis_src;
+ u32 tbl_da5_mis_src;
+ u32 tbl_da6_mis_src;
+ u32 tbl_da7_mis_src;
+ u32 tbl_sa_mis_src;
+ u32 tbl_old_sech_end_src;
+ u32 lram_ecc_err1_src;
+ u32 lram_ecc_err2_src;
+ u32 tram_ecc_err1_src;
+ u32 tram_ecc_err2_src;
+ u32 tbl_ucast_bcast_xge0_src;
+ u32 tbl_ucast_bcast_xge1_src;
+ u32 tbl_ucast_bcast_xge2_src;
+ u32 tbl_ucast_bcast_xge3_src;
+ u32 tbl_ucast_bcast_xge4_src;
+ u32 tbl_ucast_bcast_xge5_src;
+ u32 tbl_ucast_bcast_ppe_src;
+ u32 tbl_ucast_bcast_rocee_src;
+};
+
+struct dsaf_int_stat {
+ struct dsaf_int_xge_src dsaf_int_xge_stat[DSAF_COMM_CHN];
+ struct dsaf_int_ppe_src dsaf_int_ppe_stat[DSAF_COMM_CHN];
+ struct dsaf_int_rocee_src dsaf_int_rocee_stat[DSAF_COMM_CHN];
+ struct dsaf_int_tbl_src dsaf_int_tbl_stat[1];
+
+};
+
+struct dsaf_misc_op {
+ void (*cpld_set_led)(struct hns_mac_cb *mac_cb, int link_status,
+ u16 speed, int data);
+ void (*cpld_reset_led)(struct hns_mac_cb *mac_cb);
+ int (*cpld_set_led_id)(struct hns_mac_cb *mac_cb,
+ enum hnae_led_state status);
+ /* reset series function, it will be reset if the dereset is 0 */
+ void (*dsaf_reset)(struct dsaf_device *dsaf_dev, bool dereset);
+ void (*xge_srst)(struct dsaf_device *dsaf_dev, u32 port, bool dereset);
+ void (*ge_srst)(struct dsaf_device *dsaf_dev, u32 port, bool dereset);
+ void (*ppe_srst)(struct dsaf_device *dsaf_dev, u32 port, bool dereset);
+ void (*ppe_comm_srst)(struct dsaf_device *dsaf_dev, bool dereset);
+ void (*hns_dsaf_srst_chns)(struct dsaf_device *dsaf_dev, u32 msk,
+ bool dereset);
+ void (*hns_dsaf_roce_srst)(struct dsaf_device *dsaf_dev, bool dereset);
+
+ phy_interface_t (*get_phy_if)(struct hns_mac_cb *mac_cb);
+ int (*get_sfp_prsnt)(struct hns_mac_cb *mac_cb, int *sfp_prsnt);
+
+ int (*cfg_serdes_loopback)(struct hns_mac_cb *mac_cb, bool en);
+};
+
+/* Dsaf device struct define ,and mac -> dsaf */
+struct dsaf_device {
+ struct device *dev;
+ struct hnae_ae_dev ae_dev;
+
+ u8 __iomem *sc_base;
+ u8 __iomem *sds_base;
+ u8 __iomem *ppe_base;
+ u8 __iomem *io_base;
+ struct regmap *sub_ctrl;
+ phys_addr_t ppe_paddr;
+
+ u32 desc_num; /* desc num per queue*/
+ u32 buf_size; /* ring buffer size */
+ u32 reset_offset; /* reset field offset in sub sysctrl */
+ int buf_size_type; /* ring buffer size-type */
+ enum dsaf_mode dsaf_mode; /* dsaf mode */
+ enum hal_dsaf_mode dsaf_en;
+ enum hal_dsaf_tc_mode dsaf_tc_mode;
+ u32 dsaf_ver;
+ u16 tcam_max_num; /* max TCAM entry for user except promisc */
+
+ struct ppe_common_cb *ppe_common[DSAF_COMM_DEV_NUM];
+ struct rcb_common_cb *rcb_common[DSAF_COMM_DEV_NUM];
+ struct hns_mac_cb *mac_cb[DSAF_MAX_PORT_NUM];
+ struct dsaf_misc_op *misc_op;
+
+ struct dsaf_hw_stats hw_stats[DSAF_NODE_NUM];
+ struct dsaf_int_stat int_stat;
+ /* make sure tcam table config spinlock */
+ spinlock_t tcam_lock;
+};
+
+static inline void *hns_dsaf_dev_priv(const struct dsaf_device *dsaf_dev)
+{
+ return (void *)((u8 *)dsaf_dev + sizeof(*dsaf_dev));
+}
+
+#define DSAF_TBL_TCAM_KEY_PORT_S 0
+#define DSAF_TBL_TCAM_KEY_PORT_M (((1ULL << 4) - 1) << 0)
+#define DSAF_TBL_TCAM_KEY_VLAN_S 4
+#define DSAF_TBL_TCAM_KEY_VLAN_M (((1ULL << 12) - 1) << 4)
+
+struct dsaf_drv_tbl_tcam_key {
+ union {
+ struct {
+ u8 mac_3;
+ u8 mac_2;
+ u8 mac_1;
+ u8 mac_0;
+ } bits;
+
+ u32 val;
+ } high;
+ union {
+ struct {
+ u16 port_vlan;
+ u8 mac_5;
+ u8 mac_4;
+ } bits;
+
+ u32 val;
+ } low;
+};
+
+struct dsaf_drv_soft_mac_tbl {
+ struct dsaf_drv_tbl_tcam_key tcam_key;
+ u16 index; /*the entry's index in tcam tab*/
+};
+
+struct dsaf_drv_priv {
+ /* soft tab Mac key, for hardware tab*/
+ struct dsaf_drv_soft_mac_tbl *soft_mac_tbl;
+};
+
+static inline void hns_dsaf_tbl_tcam_addr_cfg(struct dsaf_device *dsaf_dev,
+ u32 tab_tcam_addr)
+{
+ dsaf_set_dev_field(dsaf_dev, DSAF_TBL_TCAM_ADDR_0_REG,
+ DSAF_TBL_TCAM_ADDR_M, DSAF_TBL_TCAM_ADDR_S,
+ tab_tcam_addr);
+}
+
+static inline void hns_dsaf_tbl_tcam_load_pul(struct dsaf_device *dsaf_dev)
+{
+ u32 o_tbl_pul;
+
+ o_tbl_pul = dsaf_read_dev(dsaf_dev, DSAF_TBL_PUL_0_REG);
+ dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_TCAM_LOAD_S, 1);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, o_tbl_pul);
+ dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_TCAM_LOAD_S, 0);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, o_tbl_pul);
+}
+
+static inline void hns_dsaf_tbl_line_addr_cfg(struct dsaf_device *dsaf_dev,
+ u32 tab_line_addr)
+{
+ dsaf_set_dev_field(dsaf_dev, DSAF_TBL_LINE_ADDR_0_REG,
+ DSAF_TBL_LINE_ADDR_M, DSAF_TBL_LINE_ADDR_S,
+ tab_line_addr);
+}
+
+static inline struct hnae_vf_cb *hns_ae_get_vf_cb(
+ struct hnae_handle *handle)
+{
+ return container_of(handle, struct hnae_vf_cb, ae_handle);
+}
+
+int hns_dsaf_set_mac_uc_entry(struct dsaf_device *dsaf_dev,
+ struct dsaf_drv_mac_single_dest_entry *mac_entry);
+int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev,
+ struct dsaf_drv_mac_single_dest_entry *mac_entry);
+int hns_dsaf_del_mac_entry(struct dsaf_device *dsaf_dev, u16 vlan_id,
+ u8 in_port_num, u8 *addr);
+int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev,
+ struct dsaf_drv_mac_single_dest_entry *mac_entry);
+void hns_dsaf_fix_mac_mode(struct hns_mac_cb *mac_cb);
+
+int hns_dsaf_ae_init(struct dsaf_device *dsaf_dev);
+void hns_dsaf_ae_uninit(struct dsaf_device *dsaf_dev);
+
+void hns_dsaf_update_stats(struct dsaf_device *dsaf_dev, u32 inode_num);
+
+int hns_dsaf_get_sset_count(struct dsaf_device *dsaf_dev, int stringset);
+void hns_dsaf_get_stats(struct dsaf_device *ddev, u64 *data, int port);
+void hns_dsaf_get_strings(int stringset, u8 *data, int port,
+ struct dsaf_device *dsaf_dev);
+
+void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data);
+int hns_dsaf_get_regs_count(void);
+void hns_dsaf_set_promisc_mode(struct dsaf_device *dsaf_dev, u32 en);
+void hns_dsaf_set_promisc_tcam(struct dsaf_device *dsaf_dev,
+ u32 port, bool enable);
+
+void hns_dsaf_get_rx_mac_pause_en(struct dsaf_device *dsaf_dev, int mac_id,
+ u32 *en);
+int hns_dsaf_set_rx_mac_pause_en(struct dsaf_device *dsaf_dev, int mac_id,
+ u32 en);
+int hns_dsaf_rm_mac_addr(
+ struct dsaf_device *dsaf_dev,
+ struct dsaf_drv_mac_single_dest_entry *mac_entry);
+
+int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev,
+ u8 mac_id, u8 port_num);
+int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port);
+
+int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset);
+
+#endif /* __HNS_DSAF_MAIN_H__ */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
new file mode 100644
index 0000000000..5df19c604d
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -0,0 +1,771 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ */
+
+#include "hns_dsaf_mac.h"
+#include "hns_dsaf_misc.h"
+#include "hns_dsaf_ppe.h"
+#include "hns_dsaf_reg.h"
+
+enum _dsm_op_index {
+ HNS_OP_RESET_FUNC = 0x1,
+ HNS_OP_SERDES_LP_FUNC = 0x2,
+ HNS_OP_LED_SET_FUNC = 0x3,
+ HNS_OP_GET_PORT_TYPE_FUNC = 0x4,
+ HNS_OP_GET_SFP_STAT_FUNC = 0x5,
+ HNS_OP_LOCATE_LED_SET_FUNC = 0x6,
+};
+
+enum _dsm_rst_type {
+ HNS_DSAF_RESET_FUNC = 0x1,
+ HNS_PPE_RESET_FUNC = 0x2,
+ HNS_XGE_RESET_FUNC = 0x4,
+ HNS_GE_RESET_FUNC = 0x5,
+ HNS_DSAF_CHN_RESET_FUNC = 0x6,
+ HNS_ROCE_RESET_FUNC = 0x7,
+};
+
+static const guid_t hns_dsaf_acpi_dsm_guid =
+ GUID_INIT(0x1A85AA1A, 0xE293, 0x415E,
+ 0x8E, 0x28, 0x8D, 0x69, 0x0A, 0x0F, 0x82, 0x0A);
+
+static void dsaf_write_sub(struct dsaf_device *dsaf_dev, u32 reg, u32 val)
+{
+ if (dsaf_dev->sub_ctrl)
+ dsaf_write_syscon(dsaf_dev->sub_ctrl, reg, val);
+ else
+ dsaf_write_reg(dsaf_dev->sc_base, reg, val);
+}
+
+static u32 dsaf_read_sub(struct dsaf_device *dsaf_dev, u32 reg)
+{
+ u32 ret = 0;
+ int err;
+
+ if (dsaf_dev->sub_ctrl) {
+ err = dsaf_read_syscon(dsaf_dev->sub_ctrl, reg, &ret);
+ if (err)
+ dev_err(dsaf_dev->dev, "dsaf_read_syscon error %d!\n",
+ err);
+ } else {
+ ret = dsaf_read_reg(dsaf_dev->sc_base, reg);
+ }
+
+ return ret;
+}
+
+static void hns_dsaf_acpi_ledctrl_by_port(struct hns_mac_cb *mac_cb, u8 op_type,
+ u32 link, u32 port, u32 act)
+{
+ union acpi_object *obj;
+ union acpi_object obj_args[3], argv4;
+
+ obj_args[0].integer.type = ACPI_TYPE_INTEGER;
+ obj_args[0].integer.value = link;
+ obj_args[1].integer.type = ACPI_TYPE_INTEGER;
+ obj_args[1].integer.value = port;
+ obj_args[2].integer.type = ACPI_TYPE_INTEGER;
+ obj_args[2].integer.value = act;
+
+ argv4.type = ACPI_TYPE_PACKAGE;
+ argv4.package.count = 3;
+ argv4.package.elements = obj_args;
+
+ obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dev),
+ &hns_dsaf_acpi_dsm_guid, 0, op_type, &argv4);
+ if (!obj) {
+ dev_warn(mac_cb->dev, "ledctrl fail, link:%d port:%d act:%d!\n",
+ link, port, act);
+ return;
+ }
+
+ ACPI_FREE(obj);
+}
+
+static void hns_dsaf_acpi_locate_ledctrl_by_port(struct hns_mac_cb *mac_cb,
+ u8 op_type, u32 locate,
+ u32 port)
+{
+ union acpi_object obj_args[2], argv4;
+ union acpi_object *obj;
+
+ obj_args[0].integer.type = ACPI_TYPE_INTEGER;
+ obj_args[0].integer.value = locate;
+ obj_args[1].integer.type = ACPI_TYPE_INTEGER;
+ obj_args[1].integer.value = port;
+
+ argv4.type = ACPI_TYPE_PACKAGE;
+ argv4.package.count = 2;
+ argv4.package.elements = obj_args;
+
+ obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dev),
+ &hns_dsaf_acpi_dsm_guid, 0, op_type, &argv4);
+ if (!obj) {
+ dev_err(mac_cb->dev, "ledctrl fail, locate:%d port:%d!\n",
+ locate, port);
+ return;
+ }
+
+ ACPI_FREE(obj);
+}
+
+static void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status,
+ u16 speed, int data)
+{
+ int speed_reg = 0;
+ u8 value;
+
+ if (!mac_cb) {
+ pr_err("sfp_led_opt mac_dev is null!\n");
+ return;
+ }
+ if (!mac_cb->cpld_ctrl) {
+ dev_err(mac_cb->dev, "mac_id=%d, cpld syscon is null !\n",
+ mac_cb->mac_id);
+ return;
+ }
+
+ if (speed == MAC_SPEED_10000)
+ speed_reg = 1;
+
+ value = mac_cb->cpld_led_value;
+
+ if (link_status) {
+ dsaf_set_bit(value, DSAF_LED_LINK_B, link_status);
+ dsaf_set_field(value, DSAF_LED_SPEED_M,
+ DSAF_LED_SPEED_S, speed_reg);
+ dsaf_set_bit(value, DSAF_LED_DATA_B, data);
+
+ if (value != mac_cb->cpld_led_value) {
+ dsaf_write_syscon(mac_cb->cpld_ctrl,
+ mac_cb->cpld_ctrl_reg, value);
+ mac_cb->cpld_led_value = value;
+ }
+ } else {
+ value = (mac_cb->cpld_led_value) & (0x1 << DSAF_LED_ANCHOR_B);
+ dsaf_write_syscon(mac_cb->cpld_ctrl,
+ mac_cb->cpld_ctrl_reg, value);
+ mac_cb->cpld_led_value = value;
+ }
+}
+
+static void hns_cpld_set_led_acpi(struct hns_mac_cb *mac_cb, int link_status,
+ u16 speed, int data)
+{
+ if (!mac_cb) {
+ pr_err("cpld_led_set mac_cb is null!\n");
+ return;
+ }
+
+ hns_dsaf_acpi_ledctrl_by_port(mac_cb, HNS_OP_LED_SET_FUNC,
+ link_status, mac_cb->mac_id, data);
+}
+
+static void cpld_led_reset(struct hns_mac_cb *mac_cb)
+{
+ if (!mac_cb || !mac_cb->cpld_ctrl)
+ return;
+
+ dsaf_write_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg,
+ CPLD_LED_DEFAULT_VALUE);
+ mac_cb->cpld_led_value = CPLD_LED_DEFAULT_VALUE;
+}
+
+static void cpld_led_reset_acpi(struct hns_mac_cb *mac_cb)
+{
+ if (!mac_cb) {
+ pr_err("cpld_led_reset mac_cb is null!\n");
+ return;
+ }
+
+ if (mac_cb->media_type != HNAE_MEDIA_TYPE_FIBER)
+ return;
+
+ hns_dsaf_acpi_ledctrl_by_port(mac_cb, HNS_OP_LED_SET_FUNC,
+ 0, mac_cb->mac_id, 0);
+}
+
+static int cpld_set_led_id(struct hns_mac_cb *mac_cb,
+ enum hnae_led_state status)
+{
+ u32 val = 0;
+ int ret;
+
+ if (!mac_cb->cpld_ctrl)
+ return 0;
+
+ switch (status) {
+ case HNAE_LED_ACTIVE:
+ ret = dsaf_read_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg,
+ &val);
+ if (ret)
+ return ret;
+
+ dsaf_set_bit(val, DSAF_LED_ANCHOR_B, CPLD_LED_ON_VALUE);
+ dsaf_write_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg,
+ val);
+ mac_cb->cpld_led_value = val;
+ break;
+ case HNAE_LED_INACTIVE:
+ dsaf_set_bit(mac_cb->cpld_led_value, DSAF_LED_ANCHOR_B,
+ CPLD_LED_DEFAULT_VALUE);
+ dsaf_write_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg,
+ mac_cb->cpld_led_value);
+ break;
+ default:
+ dev_err(mac_cb->dev, "invalid led state: %d!", status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cpld_set_led_id_acpi(struct hns_mac_cb *mac_cb,
+ enum hnae_led_state status)
+{
+ switch (status) {
+ case HNAE_LED_ACTIVE:
+ hns_dsaf_acpi_locate_ledctrl_by_port(mac_cb,
+ HNS_OP_LOCATE_LED_SET_FUNC,
+ CPLD_LED_ON_VALUE,
+ mac_cb->mac_id);
+ break;
+ case HNAE_LED_INACTIVE:
+ hns_dsaf_acpi_locate_ledctrl_by_port(mac_cb,
+ HNS_OP_LOCATE_LED_SET_FUNC,
+ CPLD_LED_DEFAULT_VALUE,
+ mac_cb->mac_id);
+ break;
+ default:
+ dev_err(mac_cb->dev, "invalid led state: %d!", status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#define RESET_REQ_OR_DREQ 1
+
+static void hns_dsaf_acpi_srst_by_port(struct dsaf_device *dsaf_dev, u8 op_type,
+ u32 port_type, u32 port, u32 val)
+{
+ union acpi_object *obj;
+ union acpi_object obj_args[3], argv4;
+
+ obj_args[0].integer.type = ACPI_TYPE_INTEGER;
+ obj_args[0].integer.value = port_type;
+ obj_args[1].integer.type = ACPI_TYPE_INTEGER;
+ obj_args[1].integer.value = port;
+ obj_args[2].integer.type = ACPI_TYPE_INTEGER;
+ obj_args[2].integer.value = val;
+
+ argv4.type = ACPI_TYPE_PACKAGE;
+ argv4.package.count = 3;
+ argv4.package.elements = obj_args;
+
+ obj = acpi_evaluate_dsm(ACPI_HANDLE(dsaf_dev->dev),
+ &hns_dsaf_acpi_dsm_guid, 0, op_type, &argv4);
+ if (!obj) {
+ dev_warn(dsaf_dev->dev, "reset port_type%d port%d fail!",
+ port_type, port);
+ return;
+ }
+
+ ACPI_FREE(obj);
+}
+
+static void hns_dsaf_rst(struct dsaf_device *dsaf_dev, bool dereset)
+{
+ u32 xbar_reg_addr;
+ u32 nt_reg_addr;
+
+ if (!dereset) {
+ xbar_reg_addr = DSAF_SUB_SC_XBAR_RESET_REQ_REG;
+ nt_reg_addr = DSAF_SUB_SC_NT_RESET_REQ_REG;
+ } else {
+ xbar_reg_addr = DSAF_SUB_SC_XBAR_RESET_DREQ_REG;
+ nt_reg_addr = DSAF_SUB_SC_NT_RESET_DREQ_REG;
+ }
+
+ dsaf_write_sub(dsaf_dev, xbar_reg_addr, RESET_REQ_OR_DREQ);
+ dsaf_write_sub(dsaf_dev, nt_reg_addr, RESET_REQ_OR_DREQ);
+}
+
+static void hns_dsaf_rst_acpi(struct dsaf_device *dsaf_dev, bool dereset)
+{
+ hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC,
+ HNS_DSAF_RESET_FUNC,
+ 0, dereset);
+}
+
+static void hns_dsaf_xge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port,
+ bool dereset)
+{
+ u32 reg_val = 0;
+ u32 reg_addr;
+
+ if (port >= DSAF_XGE_NUM)
+ return;
+
+ reg_val |= RESET_REQ_OR_DREQ;
+ reg_val |= 0x2082082 << dsaf_dev->mac_cb[port]->port_rst_off;
+
+ if (!dereset)
+ reg_addr = DSAF_SUB_SC_XGE_RESET_REQ_REG;
+ else
+ reg_addr = DSAF_SUB_SC_XGE_RESET_DREQ_REG;
+
+ dsaf_write_sub(dsaf_dev, reg_addr, reg_val);
+}
+
+static void hns_dsaf_xge_srst_by_port_acpi(struct dsaf_device *dsaf_dev,
+ u32 port, bool dereset)
+{
+ hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC,
+ HNS_XGE_RESET_FUNC, port, dereset);
+}
+
+/**
+ * hns_dsaf_srst_chns - reset dsaf channels
+ * @dsaf_dev: dsaf device struct pointer
+ * @msk: xbar channels mask value:
+ * @dereset: false - request reset , true - drop reset
+ *
+ * bit0-5 for xge0-5
+ * bit6-11 for ppe0-5
+ * bit12-17 for roce0-5
+ * bit18-19 for com/dfx
+ */
+static void
+hns_dsaf_srst_chns(struct dsaf_device *dsaf_dev, u32 msk, bool dereset)
+{
+ u32 reg_addr;
+
+ if (!dereset)
+ reg_addr = DSAF_SUB_SC_DSAF_RESET_REQ_REG;
+ else
+ reg_addr = DSAF_SUB_SC_DSAF_RESET_DREQ_REG;
+
+ dsaf_write_sub(dsaf_dev, reg_addr, msk);
+}
+
+/**
+ * hns_dsaf_srst_chns_acpi - reset dsaf channels
+ * @dsaf_dev: dsaf device struct pointer
+ * @msk: xbar channels mask value:
+ * @dereset: false - request reset , true - drop reset
+ *
+ * bit0-5 for xge0-5
+ * bit6-11 for ppe0-5
+ * bit12-17 for roce0-5
+ * bit18-19 for com/dfx
+ */
+static void
+hns_dsaf_srst_chns_acpi(struct dsaf_device *dsaf_dev, u32 msk, bool dereset)
+{
+ hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC,
+ HNS_DSAF_CHN_RESET_FUNC,
+ msk, dereset);
+}
+
+static void hns_dsaf_roce_srst(struct dsaf_device *dsaf_dev, bool dereset)
+{
+ if (!dereset) {
+ dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_ROCEE_RESET_REQ_REG, 1);
+ } else {
+ dsaf_write_sub(dsaf_dev,
+ DSAF_SUB_SC_ROCEE_CLK_DIS_REG, 1);
+ dsaf_write_sub(dsaf_dev,
+ DSAF_SUB_SC_ROCEE_RESET_DREQ_REG, 1);
+ msleep(20);
+ dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_ROCEE_CLK_EN_REG, 1);
+ }
+}
+
+static void hns_dsaf_roce_srst_acpi(struct dsaf_device *dsaf_dev, bool dereset)
+{
+ hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC,
+ HNS_ROCE_RESET_FUNC, 0, dereset);
+}
+
+static void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port,
+ bool dereset)
+{
+ u32 reg_val_1;
+ u32 reg_val_2;
+ u32 port_rst_off;
+
+ if (port >= DSAF_GE_NUM)
+ return;
+
+ if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) {
+ /* DSAF_MAX_PORT_NUM is 6, but DSAF_GE_NUM is 8.
+ We need check to prevent array overflow */
+ if (port >= DSAF_MAX_PORT_NUM)
+ return;
+ reg_val_1 = 0x1 << port;
+ port_rst_off = dsaf_dev->mac_cb[port]->port_rst_off;
+ /* there is difference between V1 and V2 in register.*/
+ reg_val_2 = AE_IS_VER1(dsaf_dev->dsaf_ver) ?
+ 0x1041041 : 0x2082082;
+ reg_val_2 <<= port_rst_off;
+
+ if (!dereset) {
+ dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_REQ1_REG,
+ reg_val_1);
+
+ dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_REQ0_REG,
+ reg_val_2);
+ } else {
+ dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_DREQ0_REG,
+ reg_val_2);
+
+ dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_DREQ1_REG,
+ reg_val_1);
+ }
+ } else {
+ reg_val_1 = 0x15540;
+ reg_val_2 = AE_IS_VER1(dsaf_dev->dsaf_ver) ? 0x100 : 0x40;
+
+ reg_val_1 <<= dsaf_dev->reset_offset;
+ reg_val_2 <<= dsaf_dev->reset_offset;
+
+ if (!dereset) {
+ dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_REQ1_REG,
+ reg_val_1);
+
+ dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_PPE_RESET_REQ_REG,
+ reg_val_2);
+ } else {
+ dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_DREQ1_REG,
+ reg_val_1);
+
+ dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_PPE_RESET_DREQ_REG,
+ reg_val_2);
+ }
+ }
+}
+
+static void hns_dsaf_ge_srst_by_port_acpi(struct dsaf_device *dsaf_dev,
+ u32 port, bool dereset)
+{
+ hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC,
+ HNS_GE_RESET_FUNC, port, dereset);
+}
+
+static void hns_ppe_srst_by_port(struct dsaf_device *dsaf_dev, u32 port,
+ bool dereset)
+{
+ u32 reg_val = 0;
+ u32 reg_addr;
+
+ reg_val |= RESET_REQ_OR_DREQ << dsaf_dev->mac_cb[port]->port_rst_off;
+
+ if (!dereset)
+ reg_addr = DSAF_SUB_SC_PPE_RESET_REQ_REG;
+ else
+ reg_addr = DSAF_SUB_SC_PPE_RESET_DREQ_REG;
+
+ dsaf_write_sub(dsaf_dev, reg_addr, reg_val);
+}
+
+static void
+hns_ppe_srst_by_port_acpi(struct dsaf_device *dsaf_dev, u32 port, bool dereset)
+{
+ hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC,
+ HNS_PPE_RESET_FUNC, port, dereset);
+}
+
+static void hns_ppe_com_srst(struct dsaf_device *dsaf_dev, bool dereset)
+{
+ u32 reg_val;
+ u32 reg_addr;
+
+ if (!(dev_of_node(dsaf_dev->dev)))
+ return;
+
+ if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) {
+ reg_val = RESET_REQ_OR_DREQ;
+ if (!dereset)
+ reg_addr = DSAF_SUB_SC_RCB_PPE_COM_RESET_REQ_REG;
+ else
+ reg_addr = DSAF_SUB_SC_RCB_PPE_COM_RESET_DREQ_REG;
+
+ } else {
+ reg_val = 0x100 << dsaf_dev->reset_offset;
+
+ if (!dereset)
+ reg_addr = DSAF_SUB_SC_PPE_RESET_REQ_REG;
+ else
+ reg_addr = DSAF_SUB_SC_PPE_RESET_DREQ_REG;
+ }
+
+ dsaf_write_sub(dsaf_dev, reg_addr, reg_val);
+}
+
+/**
+ * hns_mac_get_phy_if - get phy ifterface form serdes mode
+ * @mac_cb: mac control block
+ * retuen phy interface
+ */
+static phy_interface_t hns_mac_get_phy_if(struct hns_mac_cb *mac_cb)
+{
+ u32 mode;
+ u32 reg;
+ bool is_ver1 = AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver);
+ int mac_id = mac_cb->mac_id;
+ phy_interface_t phy_if;
+
+ if (is_ver1) {
+ if (HNS_DSAF_IS_DEBUG(mac_cb->dsaf_dev))
+ return PHY_INTERFACE_MODE_SGMII;
+
+ if (mac_id >= 0 && mac_id <= 3)
+ reg = HNS_MAC_HILINK4_REG;
+ else
+ reg = HNS_MAC_HILINK3_REG;
+ } else {
+ if (!HNS_DSAF_IS_DEBUG(mac_cb->dsaf_dev) && mac_id <= 3)
+ reg = HNS_MAC_HILINK4V2_REG;
+ else
+ reg = HNS_MAC_HILINK3V2_REG;
+ }
+
+ mode = dsaf_read_sub(mac_cb->dsaf_dev, reg);
+ if (dsaf_get_bit(mode, mac_cb->port_mode_off))
+ phy_if = PHY_INTERFACE_MODE_XGMII;
+ else
+ phy_if = PHY_INTERFACE_MODE_SGMII;
+
+ return phy_if;
+}
+
+static phy_interface_t hns_mac_get_phy_if_acpi(struct hns_mac_cb *mac_cb)
+{
+ phy_interface_t phy_if = PHY_INTERFACE_MODE_NA;
+ union acpi_object *obj;
+ union acpi_object obj_args, argv4;
+
+ obj_args.integer.type = ACPI_TYPE_INTEGER;
+ obj_args.integer.value = mac_cb->mac_id;
+
+ argv4.type = ACPI_TYPE_PACKAGE;
+ argv4.package.count = 1;
+ argv4.package.elements = &obj_args;
+
+ obj = acpi_evaluate_dsm_typed(ACPI_HANDLE(mac_cb->dev),
+ &hns_dsaf_acpi_dsm_guid, 0,
+ HNS_OP_GET_PORT_TYPE_FUNC, &argv4,
+ ACPI_TYPE_INTEGER);
+ if (!obj)
+ return phy_if;
+
+ phy_if = obj->integer.value ?
+ PHY_INTERFACE_MODE_XGMII : PHY_INTERFACE_MODE_SGMII;
+
+ dev_dbg(mac_cb->dev, "mac_id=%d, phy_if=%d\n", mac_cb->mac_id, phy_if);
+
+ ACPI_FREE(obj);
+
+ return phy_if;
+}
+
+static int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
+{
+ u32 val = 0;
+ int ret;
+
+ if (!mac_cb->cpld_ctrl)
+ return -ENODEV;
+
+ ret = dsaf_read_syscon(mac_cb->cpld_ctrl,
+ mac_cb->cpld_ctrl_reg + MAC_SFP_PORT_OFFSET,
+ &val);
+ if (ret)
+ return ret;
+
+ *sfp_prsnt = !val;
+ return 0;
+}
+
+static int hns_mac_get_sfp_prsnt_acpi(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
+{
+ union acpi_object *obj;
+ union acpi_object obj_args, argv4;
+
+ obj_args.integer.type = ACPI_TYPE_INTEGER;
+ obj_args.integer.value = mac_cb->mac_id;
+
+ argv4.type = ACPI_TYPE_PACKAGE;
+ argv4.package.count = 1;
+ argv4.package.elements = &obj_args;
+
+ obj = acpi_evaluate_dsm_typed(ACPI_HANDLE(mac_cb->dev),
+ &hns_dsaf_acpi_dsm_guid, 0,
+ HNS_OP_GET_SFP_STAT_FUNC, &argv4,
+ ACPI_TYPE_INTEGER);
+ if (!obj)
+ return -ENODEV;
+
+ *sfp_prsnt = obj->integer.value;
+
+ ACPI_FREE(obj);
+
+ return 0;
+}
+
+/**
+ * hns_mac_config_sds_loopback - set loop back for serdes
+ * @mac_cb: mac control block
+ * @en: enable or disable
+ * return 0 == success
+ */
+static int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, bool en)
+{
+ const u8 lane_id[] = {
+ 0, /* mac 0 -> lane 0 */
+ 1, /* mac 1 -> lane 1 */
+ 2, /* mac 2 -> lane 2 */
+ 3, /* mac 3 -> lane 3 */
+ 2, /* mac 4 -> lane 2 */
+ 3, /* mac 5 -> lane 3 */
+ 0, /* mac 6 -> lane 0 */
+ 1 /* mac 7 -> lane 1 */
+ };
+#define RX_CSR(lane, reg) ((0x4080 + (reg) * 0x0002 + (lane) * 0x0200) * 2)
+ u64 reg_offset = RX_CSR(lane_id[mac_cb->mac_id], 0);
+
+ int sfp_prsnt = 0;
+ int ret = hns_mac_get_sfp_prsnt(mac_cb, &sfp_prsnt);
+
+ if (!mac_cb->phy_dev) {
+ if (ret)
+ pr_info("please confirm sfp is present or not\n");
+ else
+ if (!sfp_prsnt)
+ pr_info("no sfp in this eth\n");
+ }
+
+ if (mac_cb->serdes_ctrl) {
+ u32 origin = 0;
+
+ if (!AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver)) {
+#define HILINK_ACCESS_SEL_CFG 0x40008
+ /* hilink4 & hilink3 use the same xge training and
+ * xge u adaptor. There is a hilink access sel cfg
+ * register to select which one to be configed
+ */
+ if ((!HNS_DSAF_IS_DEBUG(mac_cb->dsaf_dev)) &&
+ (mac_cb->mac_id <= 3))
+ dsaf_write_syscon(mac_cb->serdes_ctrl,
+ HILINK_ACCESS_SEL_CFG, 0);
+ else
+ dsaf_write_syscon(mac_cb->serdes_ctrl,
+ HILINK_ACCESS_SEL_CFG, 3);
+ }
+
+ ret = dsaf_read_syscon(mac_cb->serdes_ctrl, reg_offset,
+ &origin);
+ if (ret)
+ return ret;
+
+ dsaf_set_field(origin, 1ull << 10, 10, en);
+ dsaf_write_syscon(mac_cb->serdes_ctrl, reg_offset, origin);
+ } else {
+ u8 __iomem *base_addr = mac_cb->serdes_vaddr +
+ (mac_cb->mac_id <= 3 ? 0x00280000 : 0x00200000);
+ dsaf_set_reg_field(base_addr, reg_offset, 1ull << 10, 10, en);
+ }
+
+ return 0;
+}
+
+static int
+hns_mac_config_sds_loopback_acpi(struct hns_mac_cb *mac_cb, bool en)
+{
+ union acpi_object *obj;
+ union acpi_object obj_args[3], argv4;
+
+ obj_args[0].integer.type = ACPI_TYPE_INTEGER;
+ obj_args[0].integer.value = mac_cb->mac_id;
+ obj_args[1].integer.type = ACPI_TYPE_INTEGER;
+ obj_args[1].integer.value = en;
+
+ argv4.type = ACPI_TYPE_PACKAGE;
+ argv4.package.count = 2;
+ argv4.package.elements = obj_args;
+
+ obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dsaf_dev->dev),
+ &hns_dsaf_acpi_dsm_guid, 0,
+ HNS_OP_SERDES_LP_FUNC, &argv4);
+ if (!obj) {
+ dev_warn(mac_cb->dsaf_dev->dev, "set port%d serdes lp fail!",
+ mac_cb->mac_id);
+
+ return -ENOTSUPP;
+ }
+
+ ACPI_FREE(obj);
+
+ return 0;
+}
+
+struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev)
+{
+ struct dsaf_misc_op *misc_op;
+
+ misc_op = devm_kzalloc(dsaf_dev->dev, sizeof(*misc_op), GFP_KERNEL);
+ if (!misc_op)
+ return NULL;
+
+ if (dev_of_node(dsaf_dev->dev)) {
+ misc_op->cpld_set_led = hns_cpld_set_led;
+ misc_op->cpld_reset_led = cpld_led_reset;
+ misc_op->cpld_set_led_id = cpld_set_led_id;
+
+ misc_op->dsaf_reset = hns_dsaf_rst;
+ misc_op->xge_srst = hns_dsaf_xge_srst_by_port;
+ misc_op->ge_srst = hns_dsaf_ge_srst_by_port;
+ misc_op->ppe_srst = hns_ppe_srst_by_port;
+ misc_op->ppe_comm_srst = hns_ppe_com_srst;
+ misc_op->hns_dsaf_srst_chns = hns_dsaf_srst_chns;
+ misc_op->hns_dsaf_roce_srst = hns_dsaf_roce_srst;
+
+ misc_op->get_phy_if = hns_mac_get_phy_if;
+ misc_op->get_sfp_prsnt = hns_mac_get_sfp_prsnt;
+
+ misc_op->cfg_serdes_loopback = hns_mac_config_sds_loopback;
+ } else if (is_acpi_node(dsaf_dev->dev->fwnode)) {
+ misc_op->cpld_set_led = hns_cpld_set_led_acpi;
+ misc_op->cpld_reset_led = cpld_led_reset_acpi;
+ misc_op->cpld_set_led_id = cpld_set_led_id_acpi;
+
+ misc_op->dsaf_reset = hns_dsaf_rst_acpi;
+ misc_op->xge_srst = hns_dsaf_xge_srst_by_port_acpi;
+ misc_op->ge_srst = hns_dsaf_ge_srst_by_port_acpi;
+ misc_op->ppe_srst = hns_ppe_srst_by_port_acpi;
+ misc_op->ppe_comm_srst = hns_ppe_com_srst;
+ misc_op->hns_dsaf_srst_chns = hns_dsaf_srst_chns_acpi;
+ misc_op->hns_dsaf_roce_srst = hns_dsaf_roce_srst_acpi;
+
+ misc_op->get_phy_if = hns_mac_get_phy_if_acpi;
+ misc_op->get_sfp_prsnt = hns_mac_get_sfp_prsnt_acpi;
+
+ misc_op->cfg_serdes_loopback = hns_mac_config_sds_loopback_acpi;
+ } else {
+ devm_kfree(dsaf_dev->dev, (void *)misc_op);
+ misc_op = NULL;
+ }
+
+ return (void *)misc_op;
+}
+
+struct
+platform_device *hns_dsaf_find_platform_device(struct fwnode_handle *fwnode)
+{
+ struct device *dev;
+
+ dev = bus_find_device_by_fwnode(&platform_bus_type, fwnode);
+ return dev ? to_platform_device(dev) : NULL;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h
new file mode 100644
index 0000000000..f64c6667dd
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ */
+
+#ifndef _HNS_DSAF_MISC_H
+#define _HNS_DSAF_MISC_H
+
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+
+#include "hns_dsaf_mac.h"
+
+#define CPLD_ADDR_PORT_OFFSET 0x4
+
+#define HS_LED_ON 0xE
+#define HS_LED_OFF 0xF
+
+#define CPLD_LED_ON_VALUE 1
+#define CPLD_LED_DEFAULT_VALUE 0
+
+#define MAC_SFP_PORT_OFFSET 0x2
+
+#define DSAF_LED_SPEED_S 0
+#define DSAF_LED_SPEED_M (0x3 << DSAF_LED_SPEED_S)
+
+#define DSAF_LED_LINK_B 2
+#define DSAF_LED_DATA_B 4
+#define DSAF_LED_ANCHOR_B 5
+
+struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev);
+struct
+platform_device *hns_dsaf_find_platform_device(struct fwnode_handle *fwnode);
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
new file mode 100644
index 0000000000..a08d1f0a5a
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
@@ -0,0 +1,630 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+
+#include "hns_dsaf_ppe.h"
+
+void hns_ppe_set_tso_enable(struct hns_ppe_cb *ppe_cb, u32 value)
+{
+ dsaf_set_dev_bit(ppe_cb, PPEV2_CFG_TSO_EN_REG, 0, !!value);
+}
+
+void hns_ppe_set_rss_key(struct hns_ppe_cb *ppe_cb,
+ const u32 rss_key[HNS_PPEV2_RSS_KEY_NUM])
+{
+ u32 key_item;
+
+ for (key_item = 0; key_item < HNS_PPEV2_RSS_KEY_NUM; key_item++)
+ dsaf_write_dev(ppe_cb, PPEV2_RSS_KEY_REG + key_item * 0x4,
+ rss_key[key_item]);
+}
+
+void hns_ppe_set_indir_table(struct hns_ppe_cb *ppe_cb,
+ const u32 rss_tab[HNS_PPEV2_RSS_IND_TBL_SIZE])
+{
+ int i;
+ int reg_value;
+
+ for (i = 0; i < (HNS_PPEV2_RSS_IND_TBL_SIZE / 4); i++) {
+ reg_value = dsaf_read_dev(ppe_cb,
+ PPEV2_INDRECTION_TBL_REG + i * 0x4);
+
+ dsaf_set_field(reg_value, PPEV2_CFG_RSS_TBL_4N0_M,
+ PPEV2_CFG_RSS_TBL_4N0_S,
+ rss_tab[i * 4 + 0] & 0x1F);
+ dsaf_set_field(reg_value, PPEV2_CFG_RSS_TBL_4N1_M,
+ PPEV2_CFG_RSS_TBL_4N1_S,
+ rss_tab[i * 4 + 1] & 0x1F);
+ dsaf_set_field(reg_value, PPEV2_CFG_RSS_TBL_4N2_M,
+ PPEV2_CFG_RSS_TBL_4N2_S,
+ rss_tab[i * 4 + 2] & 0x1F);
+ dsaf_set_field(reg_value, PPEV2_CFG_RSS_TBL_4N3_M,
+ PPEV2_CFG_RSS_TBL_4N3_S,
+ rss_tab[i * 4 + 3] & 0x1F);
+ dsaf_write_dev(
+ ppe_cb, PPEV2_INDRECTION_TBL_REG + i * 0x4, reg_value);
+ }
+}
+
+static u8 __iomem *
+hns_ppe_common_get_ioaddr(struct ppe_common_cb *ppe_common)
+{
+ return ppe_common->dsaf_dev->ppe_base + PPE_COMMON_REG_OFFSET;
+}
+
+/**
+ * hns_ppe_common_get_cfg - get ppe common config
+ * @dsaf_dev: dasf device
+ * @comm_index: common index
+ * return 0 - success , negative --fail
+ */
+static int hns_ppe_common_get_cfg(struct dsaf_device *dsaf_dev, int comm_index)
+{
+ struct ppe_common_cb *ppe_common;
+ int ppe_num;
+
+ if (!HNS_DSAF_IS_DEBUG(dsaf_dev))
+ ppe_num = HNS_PPE_SERVICE_NW_ENGINE_NUM;
+ else
+ ppe_num = HNS_PPE_DEBUG_NW_ENGINE_NUM;
+
+ ppe_common = devm_kzalloc(dsaf_dev->dev,
+ struct_size(ppe_common, ppe_cb, ppe_num),
+ GFP_KERNEL);
+ if (!ppe_common)
+ return -ENOMEM;
+
+ ppe_common->ppe_num = ppe_num;
+ ppe_common->dsaf_dev = dsaf_dev;
+ ppe_common->comm_index = comm_index;
+ if (!HNS_DSAF_IS_DEBUG(dsaf_dev))
+ ppe_common->ppe_mode = PPE_COMMON_MODE_SERVICE;
+ else
+ ppe_common->ppe_mode = PPE_COMMON_MODE_DEBUG;
+ ppe_common->dev = dsaf_dev->dev;
+
+ ppe_common->io_base = hns_ppe_common_get_ioaddr(ppe_common);
+
+ dsaf_dev->ppe_common[comm_index] = ppe_common;
+
+ return 0;
+}
+
+static void
+hns_ppe_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index)
+{
+ dsaf_dev->ppe_common[comm_index] = NULL;
+}
+
+static u8 __iomem *hns_ppe_get_iobase(struct ppe_common_cb *ppe_common,
+ int ppe_idx)
+{
+ return ppe_common->dsaf_dev->ppe_base + ppe_idx * PPE_REG_OFFSET;
+}
+
+static void hns_ppe_get_cfg(struct ppe_common_cb *ppe_common)
+{
+ u32 i;
+ struct hns_ppe_cb *ppe_cb;
+ u32 ppe_num = ppe_common->ppe_num;
+
+ for (i = 0; i < ppe_num; i++) {
+ ppe_cb = &ppe_common->ppe_cb[i];
+ ppe_cb->dev = ppe_common->dev;
+ ppe_cb->next = NULL;
+ ppe_cb->ppe_common_cb = ppe_common;
+ ppe_cb->index = i;
+ ppe_cb->io_base = hns_ppe_get_iobase(ppe_common, i);
+ ppe_cb->virq = 0;
+ }
+}
+
+static void hns_ppe_cnt_clr_ce(struct hns_ppe_cb *ppe_cb)
+{
+ dsaf_set_dev_bit(ppe_cb, PPE_TNL_0_5_CNT_CLR_CE_REG,
+ PPE_CNT_CLR_CE_B, 1);
+}
+
+static void hns_ppe_set_vlan_strip(struct hns_ppe_cb *ppe_cb, int en)
+{
+ dsaf_write_dev(ppe_cb, PPEV2_VLAN_STRIP_EN_REG, en);
+}
+
+/**
+ * hns_ppe_checksum_hw - set ppe checksum caculate
+ * @ppe_cb: ppe device
+ * @value: value
+ */
+static void hns_ppe_checksum_hw(struct hns_ppe_cb *ppe_cb, u32 value)
+{
+ dsaf_set_dev_field(ppe_cb, PPE_CFG_PRO_CHECK_EN_REG,
+ 0xfffffff, 0, value);
+}
+
+static void hns_ppe_set_qid_mode(struct ppe_common_cb *ppe_common,
+ enum ppe_qid_mode qid_mdoe)
+{
+ dsaf_set_dev_field(ppe_common, PPE_COM_CFG_QID_MODE_REG,
+ PPE_CFG_QID_MODE_CF_QID_MODE_M,
+ PPE_CFG_QID_MODE_CF_QID_MODE_S, qid_mdoe);
+}
+
+/**
+ * hns_ppe_set_qid - set ppe qid
+ * @ppe_common: ppe common device
+ * @qid: queue id
+ */
+static void hns_ppe_set_qid(struct ppe_common_cb *ppe_common, u32 qid)
+{
+ u32 qid_mod = dsaf_read_dev(ppe_common, PPE_COM_CFG_QID_MODE_REG);
+
+ if (!dsaf_get_field(qid_mod, PPE_CFG_QID_MODE_DEF_QID_M,
+ PPE_CFG_QID_MODE_DEF_QID_S)) {
+ dsaf_set_field(qid_mod, PPE_CFG_QID_MODE_DEF_QID_M,
+ PPE_CFG_QID_MODE_DEF_QID_S, qid);
+ dsaf_write_dev(ppe_common, PPE_COM_CFG_QID_MODE_REG, qid_mod);
+ }
+}
+
+/**
+ * hns_ppe_set_port_mode - set port mode
+ * @ppe_cb: ppe device
+ * @mode: port mode
+ */
+static void hns_ppe_set_port_mode(struct hns_ppe_cb *ppe_cb,
+ enum ppe_port_mode mode)
+{
+ dsaf_write_dev(ppe_cb, PPE_CFG_XGE_MODE_REG, mode);
+}
+
+/**
+ * hns_ppe_common_init_hw - init ppe common device
+ * @ppe_common: ppe common device
+ *
+ * Return 0 on success, negative on failure
+ */
+static int hns_ppe_common_init_hw(struct ppe_common_cb *ppe_common)
+{
+ enum ppe_qid_mode qid_mode;
+ struct dsaf_device *dsaf_dev = ppe_common->dsaf_dev;
+ enum dsaf_mode dsaf_mode = dsaf_dev->dsaf_mode;
+
+ dsaf_dev->misc_op->ppe_comm_srst(dsaf_dev, 0);
+ msleep(100);
+ dsaf_dev->misc_op->ppe_comm_srst(dsaf_dev, 1);
+ msleep(100);
+
+ if (ppe_common->ppe_mode == PPE_COMMON_MODE_SERVICE) {
+ switch (dsaf_mode) {
+ case DSAF_MODE_ENABLE_FIX:
+ case DSAF_MODE_DISABLE_FIX:
+ qid_mode = PPE_QID_MODE0;
+ hns_ppe_set_qid(ppe_common, 0);
+ break;
+ case DSAF_MODE_ENABLE_0VM:
+ case DSAF_MODE_DISABLE_2PORT_64VM:
+ qid_mode = PPE_QID_MODE3;
+ break;
+ case DSAF_MODE_ENABLE_8VM:
+ case DSAF_MODE_DISABLE_2PORT_16VM:
+ qid_mode = PPE_QID_MODE4;
+ break;
+ case DSAF_MODE_ENABLE_16VM:
+ case DSAF_MODE_DISABLE_6PORT_0VM:
+ qid_mode = PPE_QID_MODE5;
+ break;
+ case DSAF_MODE_ENABLE_32VM:
+ case DSAF_MODE_DISABLE_6PORT_16VM:
+ qid_mode = PPE_QID_MODE2;
+ break;
+ case DSAF_MODE_ENABLE_128VM:
+ case DSAF_MODE_DISABLE_6PORT_4VM:
+ qid_mode = PPE_QID_MODE1;
+ break;
+ case DSAF_MODE_DISABLE_2PORT_8VM:
+ qid_mode = PPE_QID_MODE7;
+ break;
+ case DSAF_MODE_DISABLE_6PORT_2VM:
+ qid_mode = PPE_QID_MODE6;
+ break;
+ default:
+ dev_err(ppe_common->dev,
+ "get ppe queue mode failed! dsaf_mode=%d\n",
+ dsaf_mode);
+ return -EINVAL;
+ }
+ hns_ppe_set_qid_mode(ppe_common, qid_mode);
+ }
+
+ dsaf_set_dev_bit(ppe_common, PPE_COM_COMMON_CNT_CLR_CE_REG,
+ PPE_COMMON_CNT_CLR_CE_B, 1);
+
+ return 0;
+}
+
+/*clr ppe exception irq*/
+static void hns_ppe_exc_irq_en(struct hns_ppe_cb *ppe_cb, int en)
+{
+ u32 clr_vlue = 0xfffffffful;
+ u32 msk_vlue = en ? 0xfffffffful : 0; /*1 is en, 0 is dis*/
+ u32 vld_msk = 0;
+
+ /*only care bit 0,1,7*/
+ dsaf_set_bit(vld_msk, 0, 1);
+ dsaf_set_bit(vld_msk, 1, 1);
+ dsaf_set_bit(vld_msk, 7, 1);
+
+ /*clr sts**/
+ dsaf_write_dev(ppe_cb, PPE_RINT_REG, clr_vlue);
+
+ /*for some reserved bits, so set 0**/
+ dsaf_write_dev(ppe_cb, PPE_INTEN_REG, msk_vlue & vld_msk);
+}
+
+int hns_ppe_wait_tx_fifo_clean(struct hns_ppe_cb *ppe_cb)
+{
+ int wait_cnt;
+ u32 val;
+
+ wait_cnt = 0;
+ while (wait_cnt++ < HNS_MAX_WAIT_CNT) {
+ val = dsaf_read_dev(ppe_cb, PPE_CURR_TX_FIFO0_REG) & 0x3ffU;
+ if (!val)
+ break;
+
+ usleep_range(100, 200);
+ }
+
+ if (wait_cnt >= HNS_MAX_WAIT_CNT) {
+ dev_err(ppe_cb->dev, "hns ppe tx fifo clean wait timeout, still has %u pkt.\n",
+ val);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+/**
+ * hns_ppe_init_hw - init ppe
+ * @ppe_cb: ppe device
+ */
+static void hns_ppe_init_hw(struct hns_ppe_cb *ppe_cb)
+{
+ struct ppe_common_cb *ppe_common_cb = ppe_cb->ppe_common_cb;
+ u32 port = ppe_cb->index;
+ struct dsaf_device *dsaf_dev = ppe_common_cb->dsaf_dev;
+ int i;
+
+ /* get default RSS key */
+ netdev_rss_key_fill(ppe_cb->rss_key, HNS_PPEV2_RSS_KEY_SIZE);
+
+ dsaf_dev->misc_op->ppe_srst(dsaf_dev, port, 0);
+ mdelay(10);
+ dsaf_dev->misc_op->ppe_srst(dsaf_dev, port, 1);
+
+ /* clr and msk except irq*/
+ hns_ppe_exc_irq_en(ppe_cb, 0);
+
+ if (ppe_common_cb->ppe_mode == PPE_COMMON_MODE_DEBUG) {
+ hns_ppe_set_port_mode(ppe_cb, PPE_MODE_GE);
+ dsaf_write_dev(ppe_cb, PPE_CFG_PAUSE_IDLE_CNT_REG, 0);
+ } else {
+ hns_ppe_set_port_mode(ppe_cb, PPE_MODE_XGE);
+ }
+
+ hns_ppe_checksum_hw(ppe_cb, 0xffffffff);
+ hns_ppe_cnt_clr_ce(ppe_cb);
+
+ if (!AE_IS_VER1(dsaf_dev->dsaf_ver)) {
+ hns_ppe_set_vlan_strip(ppe_cb, 0);
+
+ dsaf_write_dev(ppe_cb, PPE_CFG_MAX_FRAME_LEN_REG,
+ HNS_PPEV2_MAX_FRAME_LEN);
+
+ /* set default RSS key in h/w */
+ hns_ppe_set_rss_key(ppe_cb, ppe_cb->rss_key);
+
+ /* Set default indrection table in h/w */
+ for (i = 0; i < HNS_PPEV2_RSS_IND_TBL_SIZE; i++)
+ ppe_cb->rss_indir_table[i] = i;
+ hns_ppe_set_indir_table(ppe_cb, ppe_cb->rss_indir_table);
+ }
+}
+
+/**
+ * hns_ppe_uninit_hw - uninit ppe
+ * @ppe_cb: ppe device
+ */
+static void hns_ppe_uninit_hw(struct hns_ppe_cb *ppe_cb)
+{
+ u32 port;
+
+ if (ppe_cb->ppe_common_cb) {
+ struct dsaf_device *dsaf_dev = ppe_cb->ppe_common_cb->dsaf_dev;
+
+ port = ppe_cb->index;
+ dsaf_dev->misc_op->ppe_srst(dsaf_dev, port, 0);
+ }
+}
+
+static void hns_ppe_uninit_ex(struct ppe_common_cb *ppe_common)
+{
+ u32 i;
+
+ for (i = 0; i < ppe_common->ppe_num; i++) {
+ if (ppe_common->dsaf_dev->mac_cb[i])
+ hns_ppe_uninit_hw(&ppe_common->ppe_cb[i]);
+ memset(&ppe_common->ppe_cb[i], 0, sizeof(struct hns_ppe_cb));
+ }
+}
+
+void hns_ppe_uninit(struct dsaf_device *dsaf_dev)
+{
+ u32 i;
+
+ for (i = 0; i < HNS_PPE_COM_NUM; i++) {
+ if (dsaf_dev->ppe_common[i])
+ hns_ppe_uninit_ex(dsaf_dev->ppe_common[i]);
+ hns_rcb_common_free_cfg(dsaf_dev, i);
+ hns_ppe_common_free_cfg(dsaf_dev, i);
+ }
+}
+
+/**
+ * hns_ppe_reset_common - reinit ppe/rcb hw
+ * @dsaf_dev: dasf device
+ * @ppe_common_index: the index
+ * return void
+ */
+void hns_ppe_reset_common(struct dsaf_device *dsaf_dev, u8 ppe_common_index)
+{
+ u32 i;
+ int ret;
+ struct ppe_common_cb *ppe_common;
+
+ ppe_common = dsaf_dev->ppe_common[ppe_common_index];
+ ret = hns_ppe_common_init_hw(ppe_common);
+ if (ret)
+ return;
+
+ for (i = 0; i < ppe_common->ppe_num; i++) {
+ /* We only need to initiate ppe when the port exists */
+ if (dsaf_dev->mac_cb[i])
+ hns_ppe_init_hw(&ppe_common->ppe_cb[i]);
+ }
+
+ ret = hns_rcb_common_init_hw(dsaf_dev->rcb_common[ppe_common_index]);
+ if (ret)
+ return;
+
+ hns_rcb_common_init_commit_hw(dsaf_dev->rcb_common[ppe_common_index]);
+}
+
+void hns_ppe_update_stats(struct hns_ppe_cb *ppe_cb)
+{
+ struct hns_ppe_hw_stats *hw_stats = &ppe_cb->hw_stats;
+
+ hw_stats->rx_pkts_from_sw
+ += dsaf_read_dev(ppe_cb, PPE_HIS_RX_SW_PKT_CNT_REG);
+ hw_stats->rx_pkts
+ += dsaf_read_dev(ppe_cb, PPE_HIS_RX_WR_BD_OK_PKT_CNT_REG);
+ hw_stats->rx_drop_no_bd
+ += dsaf_read_dev(ppe_cb, PPE_HIS_RX_PKT_NO_BUF_CNT_REG);
+ hw_stats->rx_alloc_buf_fail
+ += dsaf_read_dev(ppe_cb, PPE_HIS_RX_APP_BUF_FAIL_CNT_REG);
+ hw_stats->rx_alloc_buf_wait
+ += dsaf_read_dev(ppe_cb, PPE_HIS_RX_APP_BUF_WAIT_CNT_REG);
+ hw_stats->rx_drop_no_buf
+ += dsaf_read_dev(ppe_cb, PPE_HIS_RX_PKT_DROP_FUL_CNT_REG);
+ hw_stats->rx_err_fifo_full
+ += dsaf_read_dev(ppe_cb, PPE_HIS_RX_PKT_DROP_PRT_CNT_REG);
+
+ hw_stats->tx_bd_form_rcb
+ += dsaf_read_dev(ppe_cb, PPE_HIS_TX_BD_CNT_REG);
+ hw_stats->tx_pkts_from_rcb
+ += dsaf_read_dev(ppe_cb, PPE_HIS_TX_PKT_CNT_REG);
+ hw_stats->tx_pkts
+ += dsaf_read_dev(ppe_cb, PPE_HIS_TX_PKT_OK_CNT_REG);
+ hw_stats->tx_err_fifo_empty
+ += dsaf_read_dev(ppe_cb, PPE_HIS_TX_PKT_EPT_CNT_REG);
+ hw_stats->tx_err_checksum
+ += dsaf_read_dev(ppe_cb, PPE_HIS_TX_PKT_CS_FAIL_CNT_REG);
+}
+
+int hns_ppe_get_sset_count(int stringset)
+{
+ if (stringset == ETH_SS_STATS)
+ return ETH_PPE_STATIC_NUM;
+ return 0;
+}
+
+int hns_ppe_get_regs_count(void)
+{
+ return ETH_PPE_DUMP_NUM;
+}
+
+/**
+ * hns_ppe_get_strings - get ppe srting
+ * @ppe_cb: ppe device
+ * @stringset: string set type
+ * @data: output string
+ */
+void hns_ppe_get_strings(struct hns_ppe_cb *ppe_cb, int stringset, u8 *data)
+{
+ int index = ppe_cb->index;
+ u8 *buff = data;
+
+ ethtool_sprintf(&buff, "ppe%d_rx_sw_pkt", index);
+ ethtool_sprintf(&buff, "ppe%d_rx_pkt_ok", index);
+ ethtool_sprintf(&buff, "ppe%d_rx_drop_pkt_no_bd", index);
+ ethtool_sprintf(&buff, "ppe%d_rx_alloc_buf_fail", index);
+ ethtool_sprintf(&buff, "ppe%d_rx_alloc_buf_wait", index);
+ ethtool_sprintf(&buff, "ppe%d_rx_pkt_drop_no_buf", index);
+ ethtool_sprintf(&buff, "ppe%d_rx_pkt_err_fifo_full", index);
+
+ ethtool_sprintf(&buff, "ppe%d_tx_bd", index);
+ ethtool_sprintf(&buff, "ppe%d_tx_pkt", index);
+ ethtool_sprintf(&buff, "ppe%d_tx_pkt_ok", index);
+ ethtool_sprintf(&buff, "ppe%d_tx_pkt_err_fifo_empty", index);
+ ethtool_sprintf(&buff, "ppe%d_tx_pkt_err_csum_fail", index);
+}
+
+void hns_ppe_get_stats(struct hns_ppe_cb *ppe_cb, u64 *data)
+{
+ u64 *regs_buff = data;
+ struct hns_ppe_hw_stats *hw_stats = &ppe_cb->hw_stats;
+
+ regs_buff[0] = hw_stats->rx_pkts_from_sw;
+ regs_buff[1] = hw_stats->rx_pkts;
+ regs_buff[2] = hw_stats->rx_drop_no_bd;
+ regs_buff[3] = hw_stats->rx_alloc_buf_fail;
+ regs_buff[4] = hw_stats->rx_alloc_buf_wait;
+ regs_buff[5] = hw_stats->rx_drop_no_buf;
+ regs_buff[6] = hw_stats->rx_err_fifo_full;
+
+ regs_buff[7] = hw_stats->tx_bd_form_rcb;
+ regs_buff[8] = hw_stats->tx_pkts_from_rcb;
+ regs_buff[9] = hw_stats->tx_pkts;
+ regs_buff[10] = hw_stats->tx_err_fifo_empty;
+ regs_buff[11] = hw_stats->tx_err_checksum;
+}
+
+/**
+ * hns_ppe_init - init ppe device
+ * @dsaf_dev: dasf device
+ * return 0 - success , negative --fail
+ */
+int hns_ppe_init(struct dsaf_device *dsaf_dev)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < HNS_PPE_COM_NUM; i++) {
+ ret = hns_ppe_common_get_cfg(dsaf_dev, i);
+ if (ret)
+ goto get_cfg_fail;
+
+ ret = hns_rcb_common_get_cfg(dsaf_dev, i);
+ if (ret)
+ goto get_cfg_fail;
+
+ hns_ppe_get_cfg(dsaf_dev->ppe_common[i]);
+
+ ret = hns_rcb_get_cfg(dsaf_dev->rcb_common[i]);
+ if (ret)
+ goto get_cfg_fail;
+ }
+
+ for (i = 0; i < HNS_PPE_COM_NUM; i++)
+ hns_ppe_reset_common(dsaf_dev, i);
+
+ return 0;
+
+get_cfg_fail:
+ for (i = 0; i < HNS_PPE_COM_NUM; i++) {
+ hns_rcb_common_free_cfg(dsaf_dev, i);
+ hns_ppe_common_free_cfg(dsaf_dev, i);
+ }
+
+ return ret;
+}
+
+void hns_ppe_get_regs(struct hns_ppe_cb *ppe_cb, void *data)
+{
+ struct ppe_common_cb *ppe_common = ppe_cb->ppe_common_cb;
+ u32 *regs = data;
+ u32 i;
+ u32 offset;
+
+ /* ppe common registers */
+ regs[0] = dsaf_read_dev(ppe_common, PPE_COM_CFG_QID_MODE_REG);
+ regs[1] = dsaf_read_dev(ppe_common, PPE_COM_INTEN_REG);
+ regs[2] = dsaf_read_dev(ppe_common, PPE_COM_RINT_REG);
+ regs[3] = dsaf_read_dev(ppe_common, PPE_COM_INTSTS_REG);
+ regs[4] = dsaf_read_dev(ppe_common, PPE_COM_COMMON_CNT_CLR_CE_REG);
+
+ for (i = 0; i < DSAF_TOTAL_QUEUE_NUM; i++) {
+ offset = PPE_COM_HIS_RX_PKT_QID_DROP_CNT_REG + 0x4 * i;
+ regs[5 + i] = dsaf_read_dev(ppe_common, offset);
+ offset = PPE_COM_HIS_RX_PKT_QID_OK_CNT_REG + 0x4 * i;
+ regs[5 + i + DSAF_TOTAL_QUEUE_NUM]
+ = dsaf_read_dev(ppe_common, offset);
+ offset = PPE_COM_HIS_TX_PKT_QID_ERR_CNT_REG + 0x4 * i;
+ regs[5 + i + DSAF_TOTAL_QUEUE_NUM * 2]
+ = dsaf_read_dev(ppe_common, offset);
+ offset = PPE_COM_HIS_TX_PKT_QID_OK_CNT_REG + 0x4 * i;
+ regs[5 + i + DSAF_TOTAL_QUEUE_NUM * 3]
+ = dsaf_read_dev(ppe_common, offset);
+ }
+
+ /* mark end of ppe regs */
+ for (i = 521; i < 524; i++)
+ regs[i] = 0xeeeeeeee;
+
+ /* ppe channel registers */
+ regs[525] = dsaf_read_dev(ppe_cb, PPE_CFG_TX_FIFO_THRSLD_REG);
+ regs[526] = dsaf_read_dev(ppe_cb, PPE_CFG_RX_FIFO_THRSLD_REG);
+ regs[527] = dsaf_read_dev(ppe_cb, PPE_CFG_RX_FIFO_PAUSE_THRSLD_REG);
+ regs[528] = dsaf_read_dev(ppe_cb, PPE_CFG_RX_FIFO_SW_BP_THRSLD_REG);
+ regs[529] = dsaf_read_dev(ppe_cb, PPE_CFG_PAUSE_IDLE_CNT_REG);
+ regs[530] = dsaf_read_dev(ppe_cb, PPE_CFG_BUS_CTRL_REG);
+ regs[531] = dsaf_read_dev(ppe_cb, PPE_CFG_TNL_TO_BE_RST_REG);
+ regs[532] = dsaf_read_dev(ppe_cb, PPE_CURR_TNL_CAN_RST_REG);
+
+ regs[533] = dsaf_read_dev(ppe_cb, PPE_CFG_XGE_MODE_REG);
+ regs[534] = dsaf_read_dev(ppe_cb, PPE_CFG_MAX_FRAME_LEN_REG);
+ regs[535] = dsaf_read_dev(ppe_cb, PPE_CFG_RX_PKT_MODE_REG);
+ regs[536] = dsaf_read_dev(ppe_cb, PPE_CFG_RX_VLAN_TAG_REG);
+ regs[537] = dsaf_read_dev(ppe_cb, PPE_CFG_TAG_GEN_REG);
+ regs[538] = dsaf_read_dev(ppe_cb, PPE_CFG_PARSE_TAG_REG);
+ regs[539] = dsaf_read_dev(ppe_cb, PPE_CFG_PRO_CHECK_EN_REG);
+
+ regs[540] = dsaf_read_dev(ppe_cb, PPE_INTEN_REG);
+ regs[541] = dsaf_read_dev(ppe_cb, PPE_RINT_REG);
+ regs[542] = dsaf_read_dev(ppe_cb, PPE_INTSTS_REG);
+ regs[543] = dsaf_read_dev(ppe_cb, PPE_CFG_RX_PKT_INT_REG);
+
+ regs[544] = dsaf_read_dev(ppe_cb, PPE_CFG_HEAT_DECT_TIME0_REG);
+ regs[545] = dsaf_read_dev(ppe_cb, PPE_CFG_HEAT_DECT_TIME1_REG);
+
+ /* ppe static */
+ regs[546] = dsaf_read_dev(ppe_cb, PPE_HIS_RX_SW_PKT_CNT_REG);
+ regs[547] = dsaf_read_dev(ppe_cb, PPE_HIS_RX_WR_BD_OK_PKT_CNT_REG);
+ regs[548] = dsaf_read_dev(ppe_cb, PPE_HIS_RX_PKT_NO_BUF_CNT_REG);
+ regs[549] = dsaf_read_dev(ppe_cb, PPE_HIS_TX_BD_CNT_REG);
+ regs[550] = dsaf_read_dev(ppe_cb, PPE_HIS_TX_PKT_CNT_REG);
+ regs[551] = dsaf_read_dev(ppe_cb, PPE_HIS_TX_PKT_OK_CNT_REG);
+ regs[552] = dsaf_read_dev(ppe_cb, PPE_HIS_TX_PKT_EPT_CNT_REG);
+ regs[553] = dsaf_read_dev(ppe_cb, PPE_HIS_TX_PKT_CS_FAIL_CNT_REG);
+ regs[554] = dsaf_read_dev(ppe_cb, PPE_HIS_RX_APP_BUF_FAIL_CNT_REG);
+ regs[555] = dsaf_read_dev(ppe_cb, PPE_HIS_RX_APP_BUF_WAIT_CNT_REG);
+ regs[556] = dsaf_read_dev(ppe_cb, PPE_HIS_RX_PKT_DROP_FUL_CNT_REG);
+ regs[557] = dsaf_read_dev(ppe_cb, PPE_HIS_RX_PKT_DROP_PRT_CNT_REG);
+
+ regs[558] = dsaf_read_dev(ppe_cb, PPE_TNL_0_5_CNT_CLR_CE_REG);
+ regs[559] = dsaf_read_dev(ppe_cb, PPE_CFG_AXI_DBG_REG);
+ regs[560] = dsaf_read_dev(ppe_cb, PPE_HIS_PRO_ERR_REG);
+ regs[561] = dsaf_read_dev(ppe_cb, PPE_HIS_TNL_FIFO_ERR_REG);
+ regs[562] = dsaf_read_dev(ppe_cb, PPE_CURR_CFF_DATA_NUM_REG);
+ regs[563] = dsaf_read_dev(ppe_cb, PPE_CURR_RX_ST_REG);
+ regs[564] = dsaf_read_dev(ppe_cb, PPE_CURR_TX_ST_REG);
+ regs[565] = dsaf_read_dev(ppe_cb, PPE_CURR_RX_FIFO0_REG);
+ regs[566] = dsaf_read_dev(ppe_cb, PPE_CURR_RX_FIFO1_REG);
+ regs[567] = dsaf_read_dev(ppe_cb, PPE_CURR_TX_FIFO0_REG);
+ regs[568] = dsaf_read_dev(ppe_cb, PPE_CURR_TX_FIFO1_REG);
+ regs[569] = dsaf_read_dev(ppe_cb, PPE_ECO0_REG);
+ regs[570] = dsaf_read_dev(ppe_cb, PPE_ECO1_REG);
+ regs[571] = dsaf_read_dev(ppe_cb, PPE_ECO2_REG);
+
+ /* mark end of ppe regs */
+ for (i = 572; i < 576; i++)
+ regs[i] = 0xeeeeeeee;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
new file mode 100644
index 0000000000..0f0e16f9af
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ */
+
+#ifndef _HNS_DSAF_PPE_H
+#define _HNS_DSAF_PPE_H
+
+#include <linux/platform_device.h>
+
+#include "hns_dsaf_main.h"
+#include "hns_dsaf_mac.h"
+#include "hns_dsaf_rcb.h"
+
+#define HNS_PPE_SERVICE_NW_ENGINE_NUM DSAF_COMM_CHN
+#define HNS_PPE_DEBUG_NW_ENGINE_NUM 1
+#define HNS_PPE_COM_NUM DSAF_COMM_DEV_NUM
+
+#define PPE_COMMON_REG_OFFSET 0x70000
+#define PPE_REG_OFFSET 0x10000
+
+#define ETH_PPE_DUMP_NUM 576
+#define ETH_PPE_STATIC_NUM 12
+
+#define HNS_PPEV2_RSS_IND_TBL_SIZE 256
+#define HNS_PPEV2_RSS_KEY_SIZE 40 /* in bytes or 320 bits */
+#define HNS_PPEV2_RSS_KEY_NUM (HNS_PPEV2_RSS_KEY_SIZE / sizeof(u32))
+
+#define HNS_PPEV2_MAX_FRAME_LEN 0X980
+
+enum ppe_qid_mode {
+ PPE_QID_MODE0 = 0, /* fixed queue id mode */
+ PPE_QID_MODE1, /* switch:128VM non switch:6Port/4VM/4TC */
+ PPE_QID_MODE2, /* switch:32VM/4TC non switch:6Port/16VM */
+ PPE_QID_MODE3, /* switch:4TC/8RSS non switch:2Port/64VM */
+ PPE_QID_MODE4, /* switch:8VM/16RSS non switch:2Port/16VM/4TC */
+ PPE_QID_MODE5, /* switch:16VM/8TC non switch:6Port/16RSS */
+ PPE_QID_MODE6, /* switch:32VM/4RSS non switch:6Port/2VM/8TC */
+ PPE_QID_MODE7, /* switch:32RSS non switch:2Port/8VM/8TC */
+ PPE_QID_MODE8, /* switch:6VM/4TC/4RSS non switch:2Port/16VM/4RSS */
+ PPE_QID_MODE9, /* non switch:2Port/32VM/2RSS */
+ PPE_QID_MODE10, /* non switch:2Port/32RSS */
+ PPE_QID_MODE11, /* non switch:2Port/4TC/16RSS */
+};
+
+enum ppe_port_mode {
+ PPE_MODE_GE = 0,
+ PPE_MODE_XGE,
+};
+
+enum ppe_common_mode {
+ PPE_COMMON_MODE_DEBUG = 0,
+ PPE_COMMON_MODE_SERVICE,
+ PPE_COMMON_MODE_MAX
+};
+
+struct hns_ppe_hw_stats {
+ u64 rx_pkts_from_sw;
+ u64 rx_pkts;
+ u64 rx_drop_no_bd;
+ u64 rx_alloc_buf_fail;
+ u64 rx_alloc_buf_wait;
+ u64 rx_drop_no_buf;
+ u64 rx_err_fifo_full;
+ u64 tx_bd_form_rcb;
+ u64 tx_pkts_from_rcb;
+ u64 tx_pkts;
+ u64 tx_err_fifo_empty;
+ u64 tx_err_checksum;
+};
+
+struct hns_ppe_cb {
+ struct device *dev;
+ struct hns_ppe_cb *next; /* pointer to next ppe device */
+ struct ppe_common_cb *ppe_common_cb; /* belong to */
+ struct hns_ppe_hw_stats hw_stats;
+
+ u8 index; /* index in a ppe common device */
+ u8 __iomem *io_base;
+ int virq;
+ u32 rss_indir_table[HNS_PPEV2_RSS_IND_TBL_SIZE]; /*shadow indir tab */
+ u32 rss_key[HNS_PPEV2_RSS_KEY_NUM]; /* rss hash key */
+};
+
+struct ppe_common_cb {
+ struct device *dev;
+ struct dsaf_device *dsaf_dev;
+ u8 __iomem *io_base;
+
+ enum ppe_common_mode ppe_mode;
+
+ u8 comm_index; /*ppe_common index*/
+
+ u32 ppe_num;
+ struct hns_ppe_cb ppe_cb[];
+
+};
+
+int hns_ppe_wait_tx_fifo_clean(struct hns_ppe_cb *ppe_cb);
+int hns_ppe_init(struct dsaf_device *dsaf_dev);
+
+void hns_ppe_uninit(struct dsaf_device *dsaf_dev);
+
+void hns_ppe_reset_common(struct dsaf_device *dsaf_dev, u8 ppe_common_index);
+
+void hns_ppe_update_stats(struct hns_ppe_cb *ppe_cb);
+
+int hns_ppe_get_sset_count(int stringset);
+int hns_ppe_get_regs_count(void);
+void hns_ppe_get_regs(struct hns_ppe_cb *ppe_cb, void *data);
+
+void hns_ppe_get_strings(struct hns_ppe_cb *ppe_cb, int stringset, u8 *data);
+void hns_ppe_get_stats(struct hns_ppe_cb *ppe_cb, u64 *data);
+void hns_ppe_set_tso_enable(struct hns_ppe_cb *ppe_cb, u32 value);
+void hns_ppe_set_rss_key(struct hns_ppe_cb *ppe_cb,
+ const u32 rss_key[HNS_PPEV2_RSS_KEY_NUM]);
+void hns_ppe_set_indir_table(struct hns_ppe_cb *ppe_cb,
+ const u32 rss_tab[HNS_PPEV2_RSS_IND_TBL_SIZE]);
+#endif /* _HNS_DSAF_PPE_H */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
new file mode 100644
index 0000000000..93344563a2
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
@@ -0,0 +1,1088 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ */
+
+#include <linux/cdev.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <asm/cacheflush.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+
+#include "hns_dsaf_main.h"
+#include "hns_dsaf_ppe.h"
+#include "hns_dsaf_rcb.h"
+
+#define RCB_COMMON_REG_OFFSET 0x80000
+#define TX_RING 0
+#define RX_RING 1
+
+#define RCB_RESET_WAIT_TIMES 30
+#define RCB_RESET_TRY_TIMES 10
+
+/* Because default mtu is 1500, rcb buffer size is set to 2048 enough */
+#define RCB_DEFAULT_BUFFER_SIZE 2048
+
+/**
+ *hns_rcb_wait_fbd_clean - clean fbd
+ *@qs: ring struct pointer array
+ *@q_num: num of array
+ *@flag: tx or rx flag
+ */
+void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag)
+{
+ int i, wait_cnt;
+ u32 fbd_num;
+
+ for (wait_cnt = i = 0; i < q_num; wait_cnt++) {
+ usleep_range(200, 300);
+ fbd_num = 0;
+ if (flag & RCB_INT_FLAG_TX)
+ fbd_num += dsaf_read_dev(qs[i],
+ RCB_RING_TX_RING_FBDNUM_REG);
+ if (flag & RCB_INT_FLAG_RX)
+ fbd_num += dsaf_read_dev(qs[i],
+ RCB_RING_RX_RING_FBDNUM_REG);
+ if (!fbd_num)
+ i++;
+ if (wait_cnt >= 10000)
+ break;
+ }
+
+ if (i < q_num)
+ dev_err(qs[i]->handle->owner_dev,
+ "queue(%d) wait fbd(%d) clean fail!!\n", i, fbd_num);
+}
+
+int hns_rcb_wait_tx_ring_clean(struct hnae_queue *qs)
+{
+ u32 head, tail;
+ int wait_cnt;
+
+ tail = dsaf_read_dev(&qs->tx_ring, RCB_REG_TAIL);
+ wait_cnt = 0;
+ while (wait_cnt++ < HNS_MAX_WAIT_CNT) {
+ head = dsaf_read_dev(&qs->tx_ring, RCB_REG_HEAD);
+ if (tail == head)
+ break;
+
+ usleep_range(100, 200);
+ }
+
+ if (wait_cnt >= HNS_MAX_WAIT_CNT) {
+ dev_err(qs->dev->dev, "rcb wait timeout, head not equal to tail.\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+/**
+ *hns_rcb_reset_ring_hw - ring reset
+ *@q: ring struct pointer
+ */
+void hns_rcb_reset_ring_hw(struct hnae_queue *q)
+{
+ u32 wait_cnt;
+ u32 try_cnt = 0;
+ u32 could_ret;
+
+ u32 tx_fbd_num;
+
+ while (try_cnt++ < RCB_RESET_TRY_TIMES) {
+ usleep_range(100, 200);
+ tx_fbd_num = dsaf_read_dev(q, RCB_RING_TX_RING_FBDNUM_REG);
+ if (tx_fbd_num)
+ continue;
+
+ dsaf_write_dev(q, RCB_RING_PREFETCH_EN_REG, 0);
+
+ dsaf_write_dev(q, RCB_RING_T0_BE_RST, 1);
+
+ msleep(20);
+ could_ret = dsaf_read_dev(q, RCB_RING_COULD_BE_RST);
+
+ wait_cnt = 0;
+ while (!could_ret && (wait_cnt < RCB_RESET_WAIT_TIMES)) {
+ dsaf_write_dev(q, RCB_RING_T0_BE_RST, 0);
+
+ dsaf_write_dev(q, RCB_RING_T0_BE_RST, 1);
+
+ msleep(20);
+ could_ret = dsaf_read_dev(q, RCB_RING_COULD_BE_RST);
+
+ wait_cnt++;
+ }
+
+ dsaf_write_dev(q, RCB_RING_T0_BE_RST, 0);
+
+ if (could_ret)
+ break;
+ }
+
+ if (try_cnt >= RCB_RESET_TRY_TIMES)
+ dev_err(q->dev->dev, "port%d reset ring fail\n",
+ hns_ae_get_vf_cb(q->handle)->port_index);
+}
+
+/**
+ *hns_rcb_int_ctrl_hw - rcb irq enable control
+ *@q: hnae queue struct pointer
+ *@flag:ring flag tx or rx
+ *@mask:mask
+ */
+void hns_rcb_int_ctrl_hw(struct hnae_queue *q, u32 flag, u32 mask)
+{
+ u32 int_mask_en = !!mask;
+
+ if (flag & RCB_INT_FLAG_TX) {
+ dsaf_write_dev(q, RCB_RING_INTMSK_TXWL_REG, int_mask_en);
+ dsaf_write_dev(q, RCB_RING_INTMSK_TX_OVERTIME_REG,
+ int_mask_en);
+ }
+
+ if (flag & RCB_INT_FLAG_RX) {
+ dsaf_write_dev(q, RCB_RING_INTMSK_RXWL_REG, int_mask_en);
+ dsaf_write_dev(q, RCB_RING_INTMSK_RX_OVERTIME_REG,
+ int_mask_en);
+ }
+}
+
+void hns_rcb_int_clr_hw(struct hnae_queue *q, u32 flag)
+{
+ if (flag & RCB_INT_FLAG_TX) {
+ dsaf_write_dev(q, RCB_RING_INTSTS_TX_RING_REG, 1);
+ dsaf_write_dev(q, RCB_RING_INTSTS_TX_OVERTIME_REG, 1);
+ }
+
+ if (flag & RCB_INT_FLAG_RX) {
+ dsaf_write_dev(q, RCB_RING_INTSTS_RX_RING_REG, 1);
+ dsaf_write_dev(q, RCB_RING_INTSTS_RX_OVERTIME_REG, 1);
+ }
+}
+
+void hns_rcbv2_int_ctrl_hw(struct hnae_queue *q, u32 flag, u32 mask)
+{
+ u32 int_mask_en = !!mask;
+
+ if (flag & RCB_INT_FLAG_TX)
+ dsaf_write_dev(q, RCB_RING_INTMSK_TXWL_REG, int_mask_en);
+
+ if (flag & RCB_INT_FLAG_RX)
+ dsaf_write_dev(q, RCB_RING_INTMSK_RXWL_REG, int_mask_en);
+}
+
+void hns_rcbv2_int_clr_hw(struct hnae_queue *q, u32 flag)
+{
+ if (flag & RCB_INT_FLAG_TX)
+ dsaf_write_dev(q, RCBV2_TX_RING_INT_STS_REG, 1);
+
+ if (flag & RCB_INT_FLAG_RX)
+ dsaf_write_dev(q, RCBV2_RX_RING_INT_STS_REG, 1);
+}
+
+/**
+ *hns_rcb_ring_enable_hw - enable ring
+ *@q: rcb ring
+ *@val: value to write
+ */
+void hns_rcb_ring_enable_hw(struct hnae_queue *q, u32 val)
+{
+ dsaf_write_dev(q, RCB_RING_PREFETCH_EN_REG, !!val);
+}
+
+void hns_rcb_start(struct hnae_queue *q, u32 val)
+{
+ hns_rcb_ring_enable_hw(q, val);
+}
+
+/**
+ *hns_rcb_common_init_commit_hw - make rcb common init completed
+ *@rcb_common: rcb common device
+ */
+void hns_rcb_common_init_commit_hw(struct rcb_common_cb *rcb_common)
+{
+ wmb(); /* Sync point before breakpoint */
+ dsaf_write_dev(rcb_common, RCB_COM_CFG_SYS_FSH_REG, 1);
+ wmb(); /* Sync point after breakpoint */
+}
+
+/* hns_rcb_set_tx_ring_bs - init rcb ring buf size regester
+ *@q: hnae_queue
+ *@buf_size: buffer size set to hw
+ */
+void hns_rcb_set_tx_ring_bs(struct hnae_queue *q, u32 buf_size)
+{
+ u32 bd_size_type = hns_rcb_buf_size2type(buf_size);
+
+ dsaf_write_dev(q, RCB_RING_TX_RING_BD_LEN_REG,
+ bd_size_type);
+}
+
+/* hns_rcb_set_rx_ring_bs - init rcb ring buf size regester
+ *@q: hnae_queue
+ *@buf_size: buffer size set to hw
+ */
+void hns_rcb_set_rx_ring_bs(struct hnae_queue *q, u32 buf_size)
+{
+ u32 bd_size_type = hns_rcb_buf_size2type(buf_size);
+
+ dsaf_write_dev(q, RCB_RING_RX_RING_BD_LEN_REG,
+ bd_size_type);
+}
+
+/**
+ *hns_rcb_ring_init - init rcb ring
+ *@ring_pair: ring pair control block
+ *@ring_type: ring type, RX_RING or TX_RING
+ */
+static void hns_rcb_ring_init(struct ring_pair_cb *ring_pair, int ring_type)
+{
+ struct hnae_queue *q = &ring_pair->q;
+ struct hnae_ring *ring =
+ (ring_type == RX_RING) ? &q->rx_ring : &q->tx_ring;
+ dma_addr_t dma = ring->desc_dma_addr;
+
+ if (ring_type == RX_RING) {
+ dsaf_write_dev(q, RCB_RING_RX_RING_BASEADDR_L_REG,
+ (u32)dma);
+ dsaf_write_dev(q, RCB_RING_RX_RING_BASEADDR_H_REG,
+ (u32)((dma >> 31) >> 1));
+
+ hns_rcb_set_rx_ring_bs(q, ring->buf_size);
+
+ dsaf_write_dev(q, RCB_RING_RX_RING_BD_NUM_REG,
+ ring_pair->port_id_in_comm);
+ dsaf_write_dev(q, RCB_RING_RX_RING_PKTLINE_REG,
+ ring_pair->port_id_in_comm);
+ } else {
+ dsaf_write_dev(q, RCB_RING_TX_RING_BASEADDR_L_REG,
+ (u32)dma);
+ dsaf_write_dev(q, RCB_RING_TX_RING_BASEADDR_H_REG,
+ (u32)((dma >> 31) >> 1));
+
+ hns_rcb_set_tx_ring_bs(q, ring->buf_size);
+
+ dsaf_write_dev(q, RCB_RING_TX_RING_BD_NUM_REG,
+ ring_pair->port_id_in_comm);
+ dsaf_write_dev(q, RCB_RING_TX_RING_PKTLINE_REG,
+ ring_pair->port_id_in_comm + HNS_RCB_TX_PKTLINE_OFFSET);
+ }
+}
+
+/**
+ *hns_rcb_init_hw - init rcb hardware
+ *@ring: rcb ring
+ */
+void hns_rcb_init_hw(struct ring_pair_cb *ring)
+{
+ hns_rcb_ring_init(ring, RX_RING);
+ hns_rcb_ring_init(ring, TX_RING);
+}
+
+/**
+ *hns_rcb_set_port_desc_cnt - set rcb port description num
+ *@rcb_common: rcb_common device
+ *@port_idx:port index
+ *@desc_cnt:BD num
+ */
+static void hns_rcb_set_port_desc_cnt(struct rcb_common_cb *rcb_common,
+ u32 port_idx, u32 desc_cnt)
+{
+ dsaf_write_dev(rcb_common, RCB_CFG_BD_NUM_REG + port_idx * 4,
+ desc_cnt);
+}
+
+static void hns_rcb_set_port_timeout(
+ struct rcb_common_cb *rcb_common, u32 port_idx, u32 timeout)
+{
+ if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) {
+ dsaf_write_dev(rcb_common, RCB_CFG_OVERTIME_REG,
+ timeout * HNS_RCB_CLK_FREQ_MHZ);
+ } else if (!HNS_DSAF_IS_DEBUG(rcb_common->dsaf_dev)) {
+ if (timeout > HNS_RCB_DEF_GAP_TIME_USECS)
+ dsaf_write_dev(rcb_common,
+ RCB_PORT_INT_GAPTIME_REG + port_idx * 4,
+ HNS_RCB_DEF_GAP_TIME_USECS);
+ else
+ dsaf_write_dev(rcb_common,
+ RCB_PORT_INT_GAPTIME_REG + port_idx * 4,
+ timeout);
+
+ dsaf_write_dev(rcb_common,
+ RCB_PORT_CFG_OVERTIME_REG + port_idx * 4,
+ timeout);
+ } else {
+ dsaf_write_dev(rcb_common,
+ RCB_PORT_CFG_OVERTIME_REG + port_idx * 4,
+ timeout);
+ }
+}
+
+static int hns_rcb_common_get_port_num(struct rcb_common_cb *rcb_common)
+{
+ if (!HNS_DSAF_IS_DEBUG(rcb_common->dsaf_dev))
+ return HNS_RCB_SERVICE_NW_ENGINE_NUM;
+ else
+ return HNS_RCB_DEBUG_NW_ENGINE_NUM;
+}
+
+/*clr rcb comm exception irq**/
+static void hns_rcb_comm_exc_irq_en(
+ struct rcb_common_cb *rcb_common, int en)
+{
+ u32 clr_vlue = 0xfffffffful;
+ u32 msk_vlue = en ? 0 : 0xfffffffful;
+
+ /* clr int*/
+ dsaf_write_dev(rcb_common, RCB_COM_INTSTS_ECC_ERR_REG, clr_vlue);
+
+ dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_RING_STS, clr_vlue);
+
+ dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_BD_RINT_STS, clr_vlue);
+
+ dsaf_write_dev(rcb_common, RCB_COM_RINT_TX_PKT_REG, clr_vlue);
+ dsaf_write_dev(rcb_common, RCB_COM_AXI_ERR_STS, clr_vlue);
+
+ /*en msk*/
+ dsaf_write_dev(rcb_common, RCB_COM_INTMASK_ECC_ERR_REG, msk_vlue);
+
+ dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_INTMASK_RING, msk_vlue);
+
+ /*for tx bd neednot cacheline, so msk sf_txring_fbd_intmask (bit 1)**/
+ dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_INTMASK_BD, msk_vlue | 2);
+
+ dsaf_write_dev(rcb_common, RCB_COM_INTMSK_TX_PKT_REG, msk_vlue);
+ dsaf_write_dev(rcb_common, RCB_COM_AXI_WR_ERR_INTMASK, msk_vlue);
+}
+
+/**
+ *hns_rcb_common_init_hw - init rcb common hardware
+ *@rcb_common: rcb_common device
+ *retuen 0 - success , negative --fail
+ */
+int hns_rcb_common_init_hw(struct rcb_common_cb *rcb_common)
+{
+ u32 reg_val;
+ int i;
+ int port_num = hns_rcb_common_get_port_num(rcb_common);
+
+ hns_rcb_comm_exc_irq_en(rcb_common, 0);
+
+ reg_val = dsaf_read_dev(rcb_common, RCB_COM_CFG_INIT_FLAG_REG);
+ if (0x1 != (reg_val & 0x1)) {
+ dev_err(rcb_common->dsaf_dev->dev,
+ "RCB_COM_CFG_INIT_FLAG_REG reg = 0x%x\n", reg_val);
+ return -EBUSY;
+ }
+
+ for (i = 0; i < port_num; i++) {
+ hns_rcb_set_port_desc_cnt(rcb_common, i, rcb_common->desc_num);
+ hns_rcb_set_rx_coalesced_frames(
+ rcb_common, i, HNS_RCB_DEF_RX_COALESCED_FRAMES);
+ if (!AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver) &&
+ !HNS_DSAF_IS_DEBUG(rcb_common->dsaf_dev))
+ hns_rcb_set_tx_coalesced_frames(
+ rcb_common, i, HNS_RCB_DEF_TX_COALESCED_FRAMES);
+ hns_rcb_set_port_timeout(
+ rcb_common, i, HNS_RCB_DEF_COALESCED_USECS);
+ }
+
+ dsaf_write_dev(rcb_common, RCB_COM_CFG_ENDIAN_REG,
+ HNS_RCB_COMMON_ENDIAN);
+
+ if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) {
+ dsaf_write_dev(rcb_common, RCB_COM_CFG_FNA_REG, 0x0);
+ dsaf_write_dev(rcb_common, RCB_COM_CFG_FA_REG, 0x1);
+ } else {
+ dsaf_set_dev_bit(rcb_common, RCBV2_COM_CFG_USER_REG,
+ RCB_COM_CFG_FNA_B, false);
+ dsaf_set_dev_bit(rcb_common, RCBV2_COM_CFG_USER_REG,
+ RCB_COM_CFG_FA_B, true);
+ dsaf_set_dev_bit(rcb_common, RCBV2_COM_CFG_TSO_MODE_REG,
+ RCB_COM_TSO_MODE_B, HNS_TSO_MODE_8BD_32K);
+ }
+
+ return 0;
+}
+
+int hns_rcb_buf_size2type(u32 buf_size)
+{
+ int bd_size_type;
+
+ switch (buf_size) {
+ case 512:
+ bd_size_type = HNS_BD_SIZE_512_TYPE;
+ break;
+ case 1024:
+ bd_size_type = HNS_BD_SIZE_1024_TYPE;
+ break;
+ case 2048:
+ bd_size_type = HNS_BD_SIZE_2048_TYPE;
+ break;
+ case 4096:
+ bd_size_type = HNS_BD_SIZE_4096_TYPE;
+ break;
+ default:
+ bd_size_type = -EINVAL;
+ }
+
+ return bd_size_type;
+}
+
+static void hns_rcb_ring_get_cfg(struct hnae_queue *q, int ring_type)
+{
+ struct hnae_ring *ring;
+ struct rcb_common_cb *rcb_common;
+ struct ring_pair_cb *ring_pair_cb;
+ u16 desc_num, mdnum_ppkt;
+ bool irq_idx, is_ver1;
+
+ ring_pair_cb = container_of(q, struct ring_pair_cb, q);
+ is_ver1 = AE_IS_VER1(ring_pair_cb->rcb_common->dsaf_dev->dsaf_ver);
+ if (ring_type == RX_RING) {
+ ring = &q->rx_ring;
+ ring->io_base = ring_pair_cb->q.io_base;
+ irq_idx = HNS_RCB_IRQ_IDX_RX;
+ mdnum_ppkt = HNS_RCB_RING_MAX_BD_PER_PKT;
+ } else {
+ ring = &q->tx_ring;
+ ring->io_base = ring_pair_cb->q.io_base +
+ HNS_RCB_TX_REG_OFFSET;
+ irq_idx = HNS_RCB_IRQ_IDX_TX;
+ mdnum_ppkt = is_ver1 ? HNS_RCB_RING_MAX_TXBD_PER_PKT :
+ HNS_RCBV2_RING_MAX_TXBD_PER_PKT;
+ }
+
+ rcb_common = ring_pair_cb->rcb_common;
+ desc_num = rcb_common->dsaf_dev->desc_num;
+
+ ring->desc = NULL;
+ ring->desc_cb = NULL;
+
+ ring->irq = ring_pair_cb->virq[irq_idx];
+ ring->desc_dma_addr = 0;
+
+ ring->buf_size = RCB_DEFAULT_BUFFER_SIZE;
+ ring->desc_num = desc_num;
+ ring->max_desc_num_per_pkt = mdnum_ppkt;
+ ring->max_raw_data_sz_per_desc = HNS_RCB_MAX_PKT_SIZE;
+ ring->max_pkt_size = HNS_RCB_MAX_PKT_SIZE;
+ ring->next_to_use = 0;
+ ring->next_to_clean = 0;
+}
+
+static void hns_rcb_ring_pair_get_cfg(struct ring_pair_cb *ring_pair_cb)
+{
+ ring_pair_cb->q.handle = NULL;
+
+ hns_rcb_ring_get_cfg(&ring_pair_cb->q, RX_RING);
+ hns_rcb_ring_get_cfg(&ring_pair_cb->q, TX_RING);
+}
+
+static int hns_rcb_get_port_in_comm(
+ struct rcb_common_cb *rcb_common, int ring_idx)
+{
+ return ring_idx / (rcb_common->max_q_per_vf * rcb_common->max_vfn);
+}
+
+#define SERVICE_RING_IRQ_IDX(v1) \
+ ((v1) ? HNS_SERVICE_RING_IRQ_IDX : HNSV2_SERVICE_RING_IRQ_IDX)
+static int hns_rcb_get_base_irq_idx(struct rcb_common_cb *rcb_common)
+{
+ bool is_ver1 = AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver);
+
+ if (!HNS_DSAF_IS_DEBUG(rcb_common->dsaf_dev))
+ return SERVICE_RING_IRQ_IDX(is_ver1);
+ else
+ return HNS_DEBUG_RING_IRQ_IDX;
+}
+
+#define RCB_COMM_BASE_TO_RING_BASE(base, ringid)\
+ ((base) + 0x10000 + HNS_RCB_REG_OFFSET * (ringid))
+/**
+ *hns_rcb_get_cfg - get rcb config
+ *@rcb_common: rcb common device
+ */
+int hns_rcb_get_cfg(struct rcb_common_cb *rcb_common)
+{
+ struct ring_pair_cb *ring_pair_cb;
+ u32 i;
+ u32 ring_num = rcb_common->ring_num;
+ int base_irq_idx = hns_rcb_get_base_irq_idx(rcb_common);
+ struct platform_device *pdev =
+ to_platform_device(rcb_common->dsaf_dev->dev);
+ bool is_ver1 = AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver);
+
+ for (i = 0; i < ring_num; i++) {
+ ring_pair_cb = &rcb_common->ring_pair_cb[i];
+ ring_pair_cb->rcb_common = rcb_common;
+ ring_pair_cb->dev = rcb_common->dsaf_dev->dev;
+ ring_pair_cb->index = i;
+ ring_pair_cb->q.io_base =
+ RCB_COMM_BASE_TO_RING_BASE(rcb_common->io_base, i);
+ ring_pair_cb->port_id_in_comm =
+ hns_rcb_get_port_in_comm(rcb_common, i);
+ ring_pair_cb->virq[HNS_RCB_IRQ_IDX_TX] =
+ is_ver1 ? platform_get_irq(pdev, base_irq_idx + i * 2) :
+ platform_get_irq(pdev, base_irq_idx + i * 3 + 1);
+ ring_pair_cb->virq[HNS_RCB_IRQ_IDX_RX] =
+ is_ver1 ? platform_get_irq(pdev, base_irq_idx + i * 2 + 1) :
+ platform_get_irq(pdev, base_irq_idx + i * 3);
+ if ((ring_pair_cb->virq[HNS_RCB_IRQ_IDX_TX] == -EPROBE_DEFER) ||
+ (ring_pair_cb->virq[HNS_RCB_IRQ_IDX_RX] == -EPROBE_DEFER))
+ return -EPROBE_DEFER;
+
+ ring_pair_cb->q.phy_base =
+ RCB_COMM_BASE_TO_RING_BASE(rcb_common->phy_base, i);
+ hns_rcb_ring_pair_get_cfg(ring_pair_cb);
+ }
+
+ return 0;
+}
+
+/**
+ *hns_rcb_get_rx_coalesced_frames - get rcb port rx coalesced frames
+ *@rcb_common: rcb_common device
+ *@port_idx:port id in comm
+ *
+ *Returns: coalesced_frames
+ */
+u32 hns_rcb_get_rx_coalesced_frames(
+ struct rcb_common_cb *rcb_common, u32 port_idx)
+{
+ return dsaf_read_dev(rcb_common, RCB_CFG_PKTLINE_REG + port_idx * 4);
+}
+
+/**
+ *hns_rcb_get_tx_coalesced_frames - get rcb port tx coalesced frames
+ *@rcb_common: rcb_common device
+ *@port_idx:port id in comm
+ *
+ *Returns: coalesced_frames
+ */
+u32 hns_rcb_get_tx_coalesced_frames(
+ struct rcb_common_cb *rcb_common, u32 port_idx)
+{
+ u64 reg;
+
+ reg = RCB_CFG_PKTLINE_REG + (port_idx + HNS_RCB_TX_PKTLINE_OFFSET) * 4;
+ return dsaf_read_dev(rcb_common, reg);
+}
+
+/**
+ *hns_rcb_get_coalesce_usecs - get rcb port coalesced time_out
+ *@rcb_common: rcb_common device
+ *@port_idx:port id in comm
+ *
+ *Returns: time_out
+ */
+u32 hns_rcb_get_coalesce_usecs(
+ struct rcb_common_cb *rcb_common, u32 port_idx)
+{
+ if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver))
+ return dsaf_read_dev(rcb_common, RCB_CFG_OVERTIME_REG) /
+ HNS_RCB_CLK_FREQ_MHZ;
+ else
+ return dsaf_read_dev(rcb_common,
+ RCB_PORT_CFG_OVERTIME_REG + port_idx * 4);
+}
+
+/**
+ *hns_rcb_set_coalesce_usecs - set rcb port coalesced time_out
+ *@rcb_common: rcb_common device
+ *@port_idx:port id in comm
+ *@timeout:tx/rx time for coalesced time_out
+ *
+ * Returns:
+ * Zero for success, or an error code in case of failure
+ */
+int hns_rcb_set_coalesce_usecs(
+ struct rcb_common_cb *rcb_common, u32 port_idx, u32 timeout)
+{
+ u32 old_timeout = hns_rcb_get_coalesce_usecs(rcb_common, port_idx);
+
+ if (timeout == old_timeout)
+ return 0;
+
+ if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) {
+ if (!HNS_DSAF_IS_DEBUG(rcb_common->dsaf_dev)) {
+ dev_err(rcb_common->dsaf_dev->dev,
+ "error: not support coalesce_usecs setting!\n");
+ return -EINVAL;
+ }
+ }
+ if (timeout > HNS_RCB_MAX_COALESCED_USECS || timeout == 0) {
+ dev_err(rcb_common->dsaf_dev->dev,
+ "error: coalesce_usecs setting supports 1~1023us\n");
+ return -EINVAL;
+ }
+ hns_rcb_set_port_timeout(rcb_common, port_idx, timeout);
+ return 0;
+}
+
+/**
+ *hns_rcb_set_tx_coalesced_frames - set rcb coalesced frames
+ *@rcb_common: rcb_common device
+ *@port_idx:port id in comm
+ *@coalesced_frames:tx/rx BD num for coalesced frames
+ *
+ * Returns:
+ * Zero for success, or an error code in case of failure
+ */
+int hns_rcb_set_tx_coalesced_frames(
+ struct rcb_common_cb *rcb_common, u32 port_idx, u32 coalesced_frames)
+{
+ u32 old_waterline =
+ hns_rcb_get_tx_coalesced_frames(rcb_common, port_idx);
+ u64 reg;
+
+ if (coalesced_frames == old_waterline)
+ return 0;
+
+ if (coalesced_frames != 1) {
+ dev_err(rcb_common->dsaf_dev->dev,
+ "error: not support tx coalesce_frames setting!\n");
+ return -EINVAL;
+ }
+
+ reg = RCB_CFG_PKTLINE_REG + (port_idx + HNS_RCB_TX_PKTLINE_OFFSET) * 4;
+ dsaf_write_dev(rcb_common, reg, coalesced_frames);
+ return 0;
+}
+
+/**
+ *hns_rcb_set_rx_coalesced_frames - set rcb rx coalesced frames
+ *@rcb_common: rcb_common device
+ *@port_idx:port id in comm
+ *@coalesced_frames:tx/rx BD num for coalesced frames
+ *
+ * Returns:
+ * Zero for success, or an error code in case of failure
+ */
+int hns_rcb_set_rx_coalesced_frames(
+ struct rcb_common_cb *rcb_common, u32 port_idx, u32 coalesced_frames)
+{
+ u32 old_waterline =
+ hns_rcb_get_rx_coalesced_frames(rcb_common, port_idx);
+
+ if (coalesced_frames == old_waterline)
+ return 0;
+
+ if (coalesced_frames >= rcb_common->desc_num ||
+ coalesced_frames > HNS_RCB_MAX_COALESCED_FRAMES ||
+ coalesced_frames < HNS_RCB_MIN_COALESCED_FRAMES) {
+ dev_err(rcb_common->dsaf_dev->dev,
+ "error: not support coalesce_frames setting!\n");
+ return -EINVAL;
+ }
+
+ dsaf_write_dev(rcb_common, RCB_CFG_PKTLINE_REG + port_idx * 4,
+ coalesced_frames);
+ return 0;
+}
+
+/**
+ *hns_rcb_get_queue_mode - get max VM number and max ring number per VM
+ * accordding to dsaf mode
+ *@dsaf_mode: dsaf mode
+ *@max_vfn : max vfn number
+ *@max_q_per_vf:max ring number per vm
+ */
+void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode, u16 *max_vfn,
+ u16 *max_q_per_vf)
+{
+ switch (dsaf_mode) {
+ case DSAF_MODE_DISABLE_6PORT_0VM:
+ *max_vfn = 1;
+ *max_q_per_vf = 16;
+ break;
+ case DSAF_MODE_DISABLE_FIX:
+ case DSAF_MODE_DISABLE_SP:
+ *max_vfn = 1;
+ *max_q_per_vf = 1;
+ break;
+ case DSAF_MODE_DISABLE_2PORT_64VM:
+ *max_vfn = 64;
+ *max_q_per_vf = 1;
+ break;
+ case DSAF_MODE_DISABLE_6PORT_16VM:
+ *max_vfn = 16;
+ *max_q_per_vf = 1;
+ break;
+ default:
+ *max_vfn = 1;
+ *max_q_per_vf = 16;
+ break;
+ }
+}
+
+static int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev)
+{
+ switch (dsaf_dev->dsaf_mode) {
+ case DSAF_MODE_ENABLE_FIX:
+ case DSAF_MODE_DISABLE_SP:
+ return 1;
+
+ case DSAF_MODE_DISABLE_FIX:
+ return 6;
+
+ case DSAF_MODE_ENABLE_0VM:
+ return 32;
+
+ case DSAF_MODE_DISABLE_6PORT_0VM:
+ case DSAF_MODE_ENABLE_16VM:
+ case DSAF_MODE_DISABLE_6PORT_2VM:
+ case DSAF_MODE_DISABLE_6PORT_16VM:
+ case DSAF_MODE_DISABLE_6PORT_4VM:
+ case DSAF_MODE_ENABLE_8VM:
+ return 96;
+
+ case DSAF_MODE_DISABLE_2PORT_16VM:
+ case DSAF_MODE_DISABLE_2PORT_8VM:
+ case DSAF_MODE_ENABLE_32VM:
+ case DSAF_MODE_DISABLE_2PORT_64VM:
+ case DSAF_MODE_ENABLE_128VM:
+ return 128;
+
+ default:
+ dev_warn(dsaf_dev->dev,
+ "get ring num fail,use default!dsaf_mode=%d\n",
+ dsaf_dev->dsaf_mode);
+ return 128;
+ }
+}
+
+static u8 __iomem *hns_rcb_common_get_vaddr(struct rcb_common_cb *rcb_common)
+{
+ struct dsaf_device *dsaf_dev = rcb_common->dsaf_dev;
+
+ return dsaf_dev->ppe_base + RCB_COMMON_REG_OFFSET;
+}
+
+static phys_addr_t hns_rcb_common_get_paddr(struct rcb_common_cb *rcb_common)
+{
+ struct dsaf_device *dsaf_dev = rcb_common->dsaf_dev;
+
+ return dsaf_dev->ppe_paddr + RCB_COMMON_REG_OFFSET;
+}
+
+int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev,
+ int comm_index)
+{
+ struct rcb_common_cb *rcb_common;
+ enum dsaf_mode dsaf_mode = dsaf_dev->dsaf_mode;
+ u16 max_vfn;
+ u16 max_q_per_vf;
+ int ring_num = hns_rcb_get_ring_num(dsaf_dev);
+
+ rcb_common =
+ devm_kzalloc(dsaf_dev->dev,
+ struct_size(rcb_common, ring_pair_cb, ring_num),
+ GFP_KERNEL);
+ if (!rcb_common) {
+ dev_err(dsaf_dev->dev, "rcb common devm_kzalloc fail!\n");
+ return -ENOMEM;
+ }
+ rcb_common->comm_index = comm_index;
+ rcb_common->ring_num = ring_num;
+ rcb_common->dsaf_dev = dsaf_dev;
+
+ rcb_common->desc_num = dsaf_dev->desc_num;
+
+ hns_rcb_get_queue_mode(dsaf_mode, &max_vfn, &max_q_per_vf);
+ rcb_common->max_vfn = max_vfn;
+ rcb_common->max_q_per_vf = max_q_per_vf;
+
+ rcb_common->io_base = hns_rcb_common_get_vaddr(rcb_common);
+ rcb_common->phy_base = hns_rcb_common_get_paddr(rcb_common);
+
+ dsaf_dev->rcb_common[comm_index] = rcb_common;
+ return 0;
+}
+
+void hns_rcb_common_free_cfg(struct dsaf_device *dsaf_dev,
+ u32 comm_index)
+{
+ dsaf_dev->rcb_common[comm_index] = NULL;
+}
+
+void hns_rcb_update_stats(struct hnae_queue *queue)
+{
+ struct ring_pair_cb *ring =
+ container_of(queue, struct ring_pair_cb, q);
+ struct dsaf_device *dsaf_dev = ring->rcb_common->dsaf_dev;
+ struct ppe_common_cb *ppe_common
+ = dsaf_dev->ppe_common[ring->rcb_common->comm_index];
+ struct hns_ring_hw_stats *hw_stats = &ring->hw_stats;
+
+ hw_stats->rx_pkts += dsaf_read_dev(queue,
+ RCB_RING_RX_RING_PKTNUM_RECORD_REG);
+ dsaf_write_dev(queue, RCB_RING_RX_RING_PKTNUM_RECORD_REG, 0x1);
+
+ hw_stats->ppe_rx_ok_pkts += dsaf_read_dev(ppe_common,
+ PPE_COM_HIS_RX_PKT_QID_OK_CNT_REG + 4 * ring->index);
+ hw_stats->ppe_rx_drop_pkts += dsaf_read_dev(ppe_common,
+ PPE_COM_HIS_RX_PKT_QID_DROP_CNT_REG + 4 * ring->index);
+
+ hw_stats->tx_pkts += dsaf_read_dev(queue,
+ RCB_RING_TX_RING_PKTNUM_RECORD_REG);
+ dsaf_write_dev(queue, RCB_RING_TX_RING_PKTNUM_RECORD_REG, 0x1);
+
+ hw_stats->ppe_tx_ok_pkts += dsaf_read_dev(ppe_common,
+ PPE_COM_HIS_TX_PKT_QID_OK_CNT_REG + 4 * ring->index);
+ hw_stats->ppe_tx_drop_pkts += dsaf_read_dev(ppe_common,
+ PPE_COM_HIS_TX_PKT_QID_ERR_CNT_REG + 4 * ring->index);
+}
+
+/**
+ *hns_rcb_get_stats - get rcb statistic
+ *@queue: rcb ring
+ *@data:statistic value
+ */
+void hns_rcb_get_stats(struct hnae_queue *queue, u64 *data)
+{
+ u64 *regs_buff = data;
+ struct ring_pair_cb *ring =
+ container_of(queue, struct ring_pair_cb, q);
+ struct hns_ring_hw_stats *hw_stats = &ring->hw_stats;
+
+ regs_buff[0] = hw_stats->tx_pkts;
+ regs_buff[1] = hw_stats->ppe_tx_ok_pkts;
+ regs_buff[2] = hw_stats->ppe_tx_drop_pkts;
+ regs_buff[3] =
+ dsaf_read_dev(queue, RCB_RING_TX_RING_FBDNUM_REG);
+
+ regs_buff[4] = queue->tx_ring.stats.tx_pkts;
+ regs_buff[5] = queue->tx_ring.stats.tx_bytes;
+ regs_buff[6] = queue->tx_ring.stats.tx_err_cnt;
+ regs_buff[7] = queue->tx_ring.stats.io_err_cnt;
+ regs_buff[8] = queue->tx_ring.stats.sw_err_cnt;
+ regs_buff[9] = queue->tx_ring.stats.seg_pkt_cnt;
+ regs_buff[10] = queue->tx_ring.stats.restart_queue;
+ regs_buff[11] = queue->tx_ring.stats.tx_busy;
+
+ regs_buff[12] = hw_stats->rx_pkts;
+ regs_buff[13] = hw_stats->ppe_rx_ok_pkts;
+ regs_buff[14] = hw_stats->ppe_rx_drop_pkts;
+ regs_buff[15] =
+ dsaf_read_dev(queue, RCB_RING_RX_RING_FBDNUM_REG);
+
+ regs_buff[16] = queue->rx_ring.stats.rx_pkts;
+ regs_buff[17] = queue->rx_ring.stats.rx_bytes;
+ regs_buff[18] = queue->rx_ring.stats.rx_err_cnt;
+ regs_buff[19] = queue->rx_ring.stats.io_err_cnt;
+ regs_buff[20] = queue->rx_ring.stats.sw_err_cnt;
+ regs_buff[21] = queue->rx_ring.stats.seg_pkt_cnt;
+ regs_buff[22] = queue->rx_ring.stats.reuse_pg_cnt;
+ regs_buff[23] = queue->rx_ring.stats.err_pkt_len;
+ regs_buff[24] = queue->rx_ring.stats.non_vld_descs;
+ regs_buff[25] = queue->rx_ring.stats.err_bd_num;
+ regs_buff[26] = queue->rx_ring.stats.l2_err;
+ regs_buff[27] = queue->rx_ring.stats.l3l4_csum_err;
+}
+
+/**
+ *hns_rcb_get_ring_sset_count - rcb string set count
+ *@stringset:ethtool cmd
+ *return rcb ring string set count
+ */
+int hns_rcb_get_ring_sset_count(int stringset)
+{
+ if (stringset == ETH_SS_STATS)
+ return HNS_RING_STATIC_REG_NUM;
+
+ return 0;
+}
+
+/**
+ *hns_rcb_get_common_regs_count - rcb common regs count
+ *return regs count
+ */
+int hns_rcb_get_common_regs_count(void)
+{
+ return HNS_RCB_COMMON_DUMP_REG_NUM;
+}
+
+/**
+ *hns_rcb_get_ring_regs_count - rcb ring regs count
+ *return regs count
+ */
+int hns_rcb_get_ring_regs_count(void)
+{
+ return HNS_RCB_RING_DUMP_REG_NUM;
+}
+
+/**
+ *hns_rcb_get_strings - get rcb string set
+ *@stringset:string set index
+ *@data:strings name value
+ *@index:queue index
+ */
+void hns_rcb_get_strings(int stringset, u8 *data, int index)
+{
+ u8 *buff = data;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ ethtool_sprintf(&buff, "tx_ring%d_rcb_pkt_num", index);
+ ethtool_sprintf(&buff, "tx_ring%d_ppe_tx_pkt_num", index);
+ ethtool_sprintf(&buff, "tx_ring%d_ppe_drop_pkt_num", index);
+ ethtool_sprintf(&buff, "tx_ring%d_fbd_num", index);
+
+ ethtool_sprintf(&buff, "tx_ring%d_pkt_num", index);
+ ethtool_sprintf(&buff, "tx_ring%d_bytes", index);
+ ethtool_sprintf(&buff, "tx_ring%d_err_cnt", index);
+ ethtool_sprintf(&buff, "tx_ring%d_io_err", index);
+ ethtool_sprintf(&buff, "tx_ring%d_sw_err", index);
+ ethtool_sprintf(&buff, "tx_ring%d_seg_pkt", index);
+ ethtool_sprintf(&buff, "tx_ring%d_restart_queue", index);
+ ethtool_sprintf(&buff, "tx_ring%d_tx_busy", index);
+
+ ethtool_sprintf(&buff, "rx_ring%d_rcb_pkt_num", index);
+ ethtool_sprintf(&buff, "rx_ring%d_ppe_pkt_num", index);
+ ethtool_sprintf(&buff, "rx_ring%d_ppe_drop_pkt_num", index);
+ ethtool_sprintf(&buff, "rx_ring%d_fbd_num", index);
+
+ ethtool_sprintf(&buff, "rx_ring%d_pkt_num", index);
+ ethtool_sprintf(&buff, "rx_ring%d_bytes", index);
+ ethtool_sprintf(&buff, "rx_ring%d_err_cnt", index);
+ ethtool_sprintf(&buff, "rx_ring%d_io_err", index);
+ ethtool_sprintf(&buff, "rx_ring%d_sw_err", index);
+ ethtool_sprintf(&buff, "rx_ring%d_seg_pkt", index);
+ ethtool_sprintf(&buff, "rx_ring%d_reuse_pg", index);
+ ethtool_sprintf(&buff, "rx_ring%d_len_err", index);
+ ethtool_sprintf(&buff, "rx_ring%d_non_vld_desc_err", index);
+ ethtool_sprintf(&buff, "rx_ring%d_bd_num_err", index);
+ ethtool_sprintf(&buff, "rx_ring%d_l2_err", index);
+ ethtool_sprintf(&buff, "rx_ring%d_l3l4csum_err", index);
+}
+
+void hns_rcb_get_common_regs(struct rcb_common_cb *rcb_com, void *data)
+{
+ u32 *regs = data;
+ bool is_ver1 = AE_IS_VER1(rcb_com->dsaf_dev->dsaf_ver);
+ bool is_dbg = HNS_DSAF_IS_DEBUG(rcb_com->dsaf_dev);
+ u32 reg_tmp;
+ u32 reg_num_tmp;
+ u32 i;
+
+ /*rcb common registers */
+ regs[0] = dsaf_read_dev(rcb_com, RCB_COM_CFG_ENDIAN_REG);
+ regs[1] = dsaf_read_dev(rcb_com, RCB_COM_CFG_SYS_FSH_REG);
+ regs[2] = dsaf_read_dev(rcb_com, RCB_COM_CFG_INIT_FLAG_REG);
+
+ regs[3] = dsaf_read_dev(rcb_com, RCB_COM_CFG_PKT_REG);
+ regs[4] = dsaf_read_dev(rcb_com, RCB_COM_CFG_RINVLD_REG);
+ regs[5] = dsaf_read_dev(rcb_com, RCB_COM_CFG_FNA_REG);
+ regs[6] = dsaf_read_dev(rcb_com, RCB_COM_CFG_FA_REG);
+ regs[7] = dsaf_read_dev(rcb_com, RCB_COM_CFG_PKT_TC_BP_REG);
+ regs[8] = dsaf_read_dev(rcb_com, RCB_COM_CFG_PPE_TNL_CLKEN_REG);
+
+ regs[9] = dsaf_read_dev(rcb_com, RCB_COM_INTMSK_TX_PKT_REG);
+ regs[10] = dsaf_read_dev(rcb_com, RCB_COM_RINT_TX_PKT_REG);
+ regs[11] = dsaf_read_dev(rcb_com, RCB_COM_INTMASK_ECC_ERR_REG);
+ regs[12] = dsaf_read_dev(rcb_com, RCB_COM_INTSTS_ECC_ERR_REG);
+ regs[13] = dsaf_read_dev(rcb_com, RCB_COM_EBD_SRAM_ERR_REG);
+ regs[14] = dsaf_read_dev(rcb_com, RCB_COM_RXRING_ERR_REG);
+ regs[15] = dsaf_read_dev(rcb_com, RCB_COM_TXRING_ERR_REG);
+ regs[16] = dsaf_read_dev(rcb_com, RCB_COM_TX_FBD_ERR_REG);
+ regs[17] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK_EN_REG);
+ regs[18] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK0_REG);
+ regs[19] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK1_REG);
+ regs[20] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK2_REG);
+ regs[21] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK3_REG);
+ regs[22] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK4_REG);
+ regs[23] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK5_REG);
+ regs[24] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR0_REG);
+ regs[25] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR3_REG);
+ regs[26] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR4_REG);
+ regs[27] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR5_REG);
+
+ regs[28] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_INTMASK_RING);
+ regs[29] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_RING_STS);
+ regs[30] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_RING);
+ regs[31] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_INTMASK_BD);
+ regs[32] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_BD_RINT_STS);
+ regs[33] = dsaf_read_dev(rcb_com, RCB_COM_RCB_RD_BD_BUSY);
+ regs[34] = dsaf_read_dev(rcb_com, RCB_COM_RCB_FBD_CRT_EN);
+ regs[35] = dsaf_read_dev(rcb_com, RCB_COM_AXI_WR_ERR_INTMASK);
+ regs[36] = dsaf_read_dev(rcb_com, RCB_COM_AXI_ERR_STS);
+ regs[37] = dsaf_read_dev(rcb_com, RCB_COM_CHK_TX_FBD_NUM_REG);
+
+ /* rcb common entry registers */
+ for (i = 0; i < 16; i++) { /* total 16 model registers */
+ regs[38 + i]
+ = dsaf_read_dev(rcb_com, RCB_CFG_BD_NUM_REG + 4 * i);
+ regs[54 + i]
+ = dsaf_read_dev(rcb_com, RCB_CFG_PKTLINE_REG + 4 * i);
+ }
+
+ reg_tmp = is_ver1 ? RCB_CFG_OVERTIME_REG : RCB_PORT_CFG_OVERTIME_REG;
+ reg_num_tmp = (is_ver1 || is_dbg) ? 1 : 6;
+ for (i = 0; i < reg_num_tmp; i++)
+ regs[70 + i] = dsaf_read_dev(rcb_com, reg_tmp);
+
+ regs[76] = dsaf_read_dev(rcb_com, RCB_CFG_PKTLINE_INT_NUM_REG);
+ regs[77] = dsaf_read_dev(rcb_com, RCB_CFG_OVERTIME_INT_NUM_REG);
+
+ /* mark end of rcb common regs */
+ for (i = 78; i < 80; i++)
+ regs[i] = 0xcccccccc;
+}
+
+void hns_rcb_get_ring_regs(struct hnae_queue *queue, void *data)
+{
+ u32 *regs = data;
+ struct ring_pair_cb *ring_pair
+ = container_of(queue, struct ring_pair_cb, q);
+ u32 i;
+
+ /*rcb ring registers */
+ regs[0] = dsaf_read_dev(queue, RCB_RING_RX_RING_BASEADDR_L_REG);
+ regs[1] = dsaf_read_dev(queue, RCB_RING_RX_RING_BASEADDR_H_REG);
+ regs[2] = dsaf_read_dev(queue, RCB_RING_RX_RING_BD_NUM_REG);
+ regs[3] = dsaf_read_dev(queue, RCB_RING_RX_RING_BD_LEN_REG);
+ regs[4] = dsaf_read_dev(queue, RCB_RING_RX_RING_PKTLINE_REG);
+ regs[5] = dsaf_read_dev(queue, RCB_RING_RX_RING_TAIL_REG);
+ regs[6] = dsaf_read_dev(queue, RCB_RING_RX_RING_HEAD_REG);
+ regs[7] = dsaf_read_dev(queue, RCB_RING_RX_RING_FBDNUM_REG);
+ regs[8] = dsaf_read_dev(queue, RCB_RING_RX_RING_PKTNUM_RECORD_REG);
+
+ regs[9] = dsaf_read_dev(queue, RCB_RING_TX_RING_BASEADDR_L_REG);
+ regs[10] = dsaf_read_dev(queue, RCB_RING_TX_RING_BASEADDR_H_REG);
+ regs[11] = dsaf_read_dev(queue, RCB_RING_TX_RING_BD_NUM_REG);
+ regs[12] = dsaf_read_dev(queue, RCB_RING_TX_RING_BD_LEN_REG);
+ regs[13] = dsaf_read_dev(queue, RCB_RING_TX_RING_PKTLINE_REG);
+ regs[15] = dsaf_read_dev(queue, RCB_RING_TX_RING_TAIL_REG);
+ regs[16] = dsaf_read_dev(queue, RCB_RING_TX_RING_HEAD_REG);
+ regs[17] = dsaf_read_dev(queue, RCB_RING_TX_RING_FBDNUM_REG);
+ regs[18] = dsaf_read_dev(queue, RCB_RING_TX_RING_OFFSET_REG);
+ regs[19] = dsaf_read_dev(queue, RCB_RING_TX_RING_PKTNUM_RECORD_REG);
+
+ regs[20] = dsaf_read_dev(queue, RCB_RING_PREFETCH_EN_REG);
+ regs[21] = dsaf_read_dev(queue, RCB_RING_CFG_VF_NUM_REG);
+ regs[22] = dsaf_read_dev(queue, RCB_RING_ASID_REG);
+ regs[23] = dsaf_read_dev(queue, RCB_RING_RX_VM_REG);
+ regs[24] = dsaf_read_dev(queue, RCB_RING_T0_BE_RST);
+ regs[25] = dsaf_read_dev(queue, RCB_RING_COULD_BE_RST);
+ regs[26] = dsaf_read_dev(queue, RCB_RING_WRR_WEIGHT_REG);
+
+ regs[27] = dsaf_read_dev(queue, RCB_RING_INTMSK_RXWL_REG);
+ regs[28] = dsaf_read_dev(queue, RCB_RING_INTSTS_RX_RING_REG);
+ regs[29] = dsaf_read_dev(queue, RCB_RING_INTMSK_TXWL_REG);
+ regs[30] = dsaf_read_dev(queue, RCB_RING_INTSTS_TX_RING_REG);
+ regs[31] = dsaf_read_dev(queue, RCB_RING_INTMSK_RX_OVERTIME_REG);
+ regs[32] = dsaf_read_dev(queue, RCB_RING_INTSTS_RX_OVERTIME_REG);
+ regs[33] = dsaf_read_dev(queue, RCB_RING_INTMSK_TX_OVERTIME_REG);
+ regs[34] = dsaf_read_dev(queue, RCB_RING_INTSTS_TX_OVERTIME_REG);
+
+ /* mark end of ring regs */
+ for (i = 35; i < 40; i++)
+ regs[i] = 0xcccccc00 + ring_pair->index;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
new file mode 100644
index 0000000000..a9f8059256
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ */
+
+#ifndef _HNS_DSAF_RCB_H
+#define _HNS_DSAF_RCB_H
+
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+
+#include "hnae.h"
+#include "hns_dsaf_main.h"
+
+struct rcb_common_cb;
+
+#define HNS_RCB_IRQ_NUM_PER_QUEUE 2
+#define HNS_RCB_IRQ_IDX_TX 0
+#define HNS_RCB_IRQ_IDX_RX 1
+#define HNS_RCB_TX_REG_OFFSET 0x40
+
+#define HNS_RCB_SERVICE_NW_ENGINE_NUM DSAF_COMM_CHN
+#define HNS_RCB_DEBUG_NW_ENGINE_NUM 1
+#define HNS_RCB_RING_MAX_BD_PER_PKT 3
+#define HNS_RCB_RING_MAX_TXBD_PER_PKT 3
+#define HNS_RCBV2_RING_MAX_TXBD_PER_PKT 8
+#define HNS_RCB_MAX_PKT_SIZE MAC_MAX_MTU
+
+#define HNS_RCB_RING_MAX_PENDING_BD 1024
+#define HNS_RCB_RING_MIN_PENDING_BD 16
+
+#define HNS_RCB_REG_OFFSET 0x10000
+
+#define HNS_RCB_TX_FRAMES_LOW 1
+#define HNS_RCB_RX_FRAMES_LOW 1
+#define HNS_RCB_TX_FRAMES_HIGH 1023
+#define HNS_RCB_RX_FRAMES_HIGH 1023
+#define HNS_RCB_TX_USECS_LOW 1
+#define HNS_RCB_RX_USECS_LOW 1
+#define HNS_RCB_TX_USECS_HIGH 1023
+#define HNS_RCB_RX_USECS_HIGH 1023
+#define HNS_RCB_MAX_COALESCED_FRAMES 1023
+#define HNS_RCB_MIN_COALESCED_FRAMES 1
+#define HNS_RCB_DEF_RX_COALESCED_FRAMES 50
+#define HNS_RCB_DEF_TX_COALESCED_FRAMES 1
+#define HNS_RCB_CLK_FREQ_MHZ 350
+#define HNS_RCB_MAX_COALESCED_USECS 0x3ff
+#define HNS_RCB_DEF_COALESCED_USECS 30
+#define HNS_RCB_DEF_GAP_TIME_USECS 20
+#define HNS_RCB_TX_PKTLINE_OFFSET 8
+
+#define HNS_RCB_COMMON_ENDIAN 1
+
+#define HNS_BD_SIZE_512_TYPE 0
+#define HNS_BD_SIZE_1024_TYPE 1
+#define HNS_BD_SIZE_2048_TYPE 2
+#define HNS_BD_SIZE_4096_TYPE 3
+
+#define HNS_RCB_COMMON_DUMP_REG_NUM 80
+#define HNS_RCB_RING_DUMP_REG_NUM 40
+#define HNS_RING_STATIC_REG_NUM 28
+
+#define HNS_DUMP_REG_NUM 500
+#define HNS_STATIC_REG_NUM 12
+
+#define HNS_TSO_MODE_8BD_32K 1
+#define HNS_TSO_MDOE_4BD_16K 0
+
+enum rcb_int_flag {
+ RCB_INT_FLAG_TX = 0x1,
+ RCB_INT_FLAG_RX = (0x1 << 1),
+ RCB_INT_FLAG_MAX = (0x1 << 2), /*must be the last element */
+};
+
+struct hns_ring_hw_stats {
+ u64 tx_pkts;
+ u64 ppe_tx_ok_pkts;
+ u64 ppe_tx_drop_pkts;
+ u64 rx_pkts;
+ u64 ppe_rx_ok_pkts;
+ u64 ppe_rx_drop_pkts;
+};
+
+struct ring_pair_cb {
+ struct rcb_common_cb *rcb_common; /* ring belongs to */
+ struct device *dev; /*device for DMA mapping */
+ struct hnae_queue q;
+
+ u16 index; /* global index in a rcb common device */
+ u16 buf_size;
+
+ int virq[HNS_RCB_IRQ_NUM_PER_QUEUE];
+
+ u8 port_id_in_comm;
+ u8 used_by_vf;
+
+ struct hns_ring_hw_stats hw_stats;
+};
+
+struct rcb_common_cb {
+ u8 __iomem *io_base;
+ phys_addr_t phy_base;
+ struct dsaf_device *dsaf_dev;
+ u16 max_vfn;
+ u16 max_q_per_vf;
+
+ u8 comm_index;
+ u32 ring_num;
+ u32 desc_num; /* desc num per queue*/
+
+ struct ring_pair_cb ring_pair_cb[];
+};
+
+int hns_rcb_buf_size2type(u32 buf_size);
+
+int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev, int comm_index);
+void hns_rcb_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index);
+int hns_rcb_common_init_hw(struct rcb_common_cb *rcb_common);
+void hns_rcb_start(struct hnae_queue *q, u32 val);
+int hns_rcb_get_cfg(struct rcb_common_cb *rcb_common);
+void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode,
+ u16 *max_vfn, u16 *max_q_per_vf);
+
+void hns_rcb_common_init_commit_hw(struct rcb_common_cb *rcb_common);
+
+void hns_rcb_ring_enable_hw(struct hnae_queue *q, u32 val);
+void hns_rcb_int_clr_hw(struct hnae_queue *q, u32 flag);
+void hns_rcb_int_ctrl_hw(struct hnae_queue *q, u32 flag, u32 enable);
+void hns_rcbv2_int_ctrl_hw(struct hnae_queue *q, u32 flag, u32 mask);
+void hns_rcbv2_int_clr_hw(struct hnae_queue *q, u32 flag);
+
+void hns_rcb_init_hw(struct ring_pair_cb *ring);
+void hns_rcb_reset_ring_hw(struct hnae_queue *q);
+void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag);
+int hns_rcb_wait_tx_ring_clean(struct hnae_queue *qs);
+u32 hns_rcb_get_rx_coalesced_frames(
+ struct rcb_common_cb *rcb_common, u32 port_idx);
+u32 hns_rcb_get_tx_coalesced_frames(
+ struct rcb_common_cb *rcb_common, u32 port_idx);
+u32 hns_rcb_get_coalesce_usecs(
+ struct rcb_common_cb *rcb_common, u32 port_idx);
+int hns_rcb_set_coalesce_usecs(
+ struct rcb_common_cb *rcb_common, u32 port_idx, u32 timeout);
+int hns_rcb_set_rx_coalesced_frames(
+ struct rcb_common_cb *rcb_common, u32 port_idx, u32 coalesced_frames);
+int hns_rcb_set_tx_coalesced_frames(
+ struct rcb_common_cb *rcb_common, u32 port_idx, u32 coalesced_frames);
+void hns_rcb_update_stats(struct hnae_queue *queue);
+
+void hns_rcb_get_stats(struct hnae_queue *queue, u64 *data);
+
+void hns_rcb_get_common_regs(struct rcb_common_cb *rcb_common, void *data);
+
+int hns_rcb_get_ring_sset_count(int stringset);
+int hns_rcb_get_common_regs_count(void);
+int hns_rcb_get_ring_regs_count(void);
+
+void hns_rcb_get_ring_regs(struct hnae_queue *queue, void *data);
+
+void hns_rcb_get_strings(int stringset, u8 *data, int index);
+void hns_rcb_set_rx_ring_bs(struct hnae_queue *q, u32 buf_size);
+void hns_rcb_set_tx_ring_bs(struct hnae_queue *q, u32 buf_size);
+
+#endif /* _HNS_DSAF_RCB_H */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
new file mode 100644
index 0000000000..47ccb6e0fc
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
@@ -0,0 +1,1095 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ */
+
+#ifndef _DSAF_REG_H_
+#define _DSAF_REG_H_
+
+#include <linux/regmap.h>
+#define HNS_DEBUG_RING_IRQ_IDX 0
+#define HNS_SERVICE_RING_IRQ_IDX 59
+#define HNSV2_SERVICE_RING_IRQ_IDX 25
+
+#define DSAF_MAX_PORT_NUM 6
+#define DSAF_MAX_VM_NUM 128
+
+#define DSAF_COMM_DEV_NUM 1
+#define DSAF_PPE_INODE_BASE 6
+#define DSAF_DEBUG_NW_NUM 2
+#define DSAF_SERVICE_NW_NUM 6
+#define DSAF_COMM_CHN DSAF_SERVICE_NW_NUM
+#define DSAF_GE_NUM ((DSAF_SERVICE_NW_NUM) + (DSAF_DEBUG_NW_NUM))
+#define DSAF_XGE_NUM DSAF_SERVICE_NW_NUM
+#define DSAF_PORT_TYPE_NUM 3
+#define DSAF_NODE_NUM 18
+#define DSAF_XOD_BIG_NUM DSAF_NODE_NUM
+#define DSAF_SBM_NUM DSAF_NODE_NUM
+#define DSAFV2_SBM_NUM 8
+#define DSAFV2_SBM_XGE_CHN 6
+#define DSAFV2_SBM_PPE_CHN 1
+#define DASFV2_ROCEE_CRD_NUM 1
+
+#define DSAF_VOQ_NUM DSAF_NODE_NUM
+#define DSAF_INODE_NUM DSAF_NODE_NUM
+#define DSAF_XOD_NUM 8
+#define DSAF_TBL_NUM 8
+#define DSAF_SW_PORT_NUM 8
+#define DSAF_TOTAL_QUEUE_NUM 129
+
+/* reserved a tcam entry for each port to support promisc by fuzzy match */
+#define DSAFV2_MAC_FUZZY_TCAM_NUM DSAF_MAX_PORT_NUM
+
+#define DSAF_TCAM_SUM 512
+#define DSAF_LINE_SUM (2048 * 14)
+
+#define DSAF_SUB_SC_NT_SRAM_CLK_SEL_REG 0x100
+#define DSAF_SUB_SC_HILINK3_CRG_CTRL0_REG 0x180
+#define DSAF_SUB_SC_HILINK3_CRG_CTRL1_REG 0x184
+#define DSAF_SUB_SC_HILINK3_CRG_CTRL2_REG 0x188
+#define DSAF_SUB_SC_HILINK3_CRG_CTRL3_REG 0x18C
+#define DSAF_SUB_SC_HILINK4_CRG_CTRL0_REG 0x190
+#define DSAF_SUB_SC_HILINK4_CRG_CTRL1_REG 0x194
+#define DSAF_SUB_SC_DSAF_CLK_EN_REG 0x300
+#define DSAF_SUB_SC_DSAF_CLK_DIS_REG 0x304
+#define DSAF_SUB_SC_NT_CLK_EN_REG 0x308
+#define DSAF_SUB_SC_NT_CLK_DIS_REG 0x30C
+#define DSAF_SUB_SC_XGE_CLK_EN_REG 0x310
+#define DSAF_SUB_SC_XGE_CLK_DIS_REG 0x314
+#define DSAF_SUB_SC_GE_CLK_EN_REG 0x318
+#define DSAF_SUB_SC_GE_CLK_DIS_REG 0x31C
+#define DSAF_SUB_SC_PPE_CLK_EN_REG 0x320
+#define DSAF_SUB_SC_PPE_CLK_DIS_REG 0x324
+#define DSAF_SUB_SC_RCB_PPE_COM_CLK_EN_REG 0x350
+#define DSAF_SUB_SC_RCB_PPE_COM_CLK_DIS_REG 0x354
+#define DSAF_SUB_SC_XBAR_RESET_REQ_REG 0xA00
+#define DSAF_SUB_SC_XBAR_RESET_DREQ_REG 0xA04
+#define DSAF_SUB_SC_NT_RESET_REQ_REG 0xA08
+#define DSAF_SUB_SC_NT_RESET_DREQ_REG 0xA0C
+#define DSAF_SUB_SC_XGE_RESET_REQ_REG 0xA10
+#define DSAF_SUB_SC_XGE_RESET_DREQ_REG 0xA14
+#define DSAF_SUB_SC_GE_RESET_REQ0_REG 0xA18
+#define DSAF_SUB_SC_GE_RESET_DREQ0_REG 0xA1C
+#define DSAF_SUB_SC_GE_RESET_REQ1_REG 0xA20
+#define DSAF_SUB_SC_GE_RESET_DREQ1_REG 0xA24
+#define DSAF_SUB_SC_PPE_RESET_REQ_REG 0xA48
+#define DSAF_SUB_SC_PPE_RESET_DREQ_REG 0xA4C
+#define DSAF_SUB_SC_RCB_PPE_COM_RESET_REQ_REG 0xA88
+#define DSAF_SUB_SC_RCB_PPE_COM_RESET_DREQ_REG 0xA8C
+#define DSAF_SUB_SC_DSAF_RESET_REQ_REG 0xAA8
+#define DSAF_SUB_SC_DSAF_RESET_DREQ_REG 0xAAC
+#define DSAF_SUB_SC_ROCEE_RESET_REQ_REG 0xA50
+#define DSAF_SUB_SC_ROCEE_RESET_DREQ_REG 0xA54
+#define DSAF_SUB_SC_ROCEE_CLK_DIS_REG 0x32C
+#define DSAF_SUB_SC_ROCEE_CLK_EN_REG 0x328
+#define DSAF_SUB_SC_LIGHT_MODULE_DETECT_EN_REG 0x2060
+#define DSAF_SUB_SC_TCAM_MBIST_EN_REG 0x2300
+#define DSAF_SUB_SC_DSAF_CLK_ST_REG 0x5300
+#define DSAF_SUB_SC_NT_CLK_ST_REG 0x5304
+#define DSAF_SUB_SC_XGE_CLK_ST_REG 0x5308
+#define DSAF_SUB_SC_GE_CLK_ST_REG 0x530C
+#define DSAF_SUB_SC_PPE_CLK_ST_REG 0x5310
+#define DSAF_SUB_SC_ROCEE_CLK_ST_REG 0x5314
+#define DSAF_SUB_SC_CPU_CLK_ST_REG 0x5318
+#define DSAF_SUB_SC_RCB_PPE_COM_CLK_ST_REG 0x5328
+#define DSAF_SUB_SC_XBAR_RESET_ST_REG 0x5A00
+#define DSAF_SUB_SC_NT_RESET_ST_REG 0x5A04
+#define DSAF_SUB_SC_XGE_RESET_ST_REG 0x5A08
+#define DSAF_SUB_SC_GE_RESET_ST0_REG 0x5A0C
+#define DSAF_SUB_SC_GE_RESET_ST1_REG 0x5A10
+#define DSAF_SUB_SC_PPE_RESET_ST_REG 0x5A24
+#define DSAF_SUB_SC_RCB_PPE_COM_RESET_ST_REG 0x5A44
+
+/*serdes offset**/
+#define HNS_MAC_HILINK3_REG DSAF_SUB_SC_HILINK3_CRG_CTRL0_REG
+#define HNS_MAC_HILINK4_REG DSAF_SUB_SC_HILINK4_CRG_CTRL0_REG
+#define HNS_MAC_HILINK3V2_REG DSAF_SUB_SC_HILINK3_CRG_CTRL1_REG
+#define HNS_MAC_HILINK4V2_REG DSAF_SUB_SC_HILINK4_CRG_CTRL1_REG
+#define HNS_MAC_LANE0_CTLEDFE_REG 0x000BFFCCULL
+#define HNS_MAC_LANE1_CTLEDFE_REG 0x000BFFBCULL
+#define HNS_MAC_LANE2_CTLEDFE_REG 0x000BFFACULL
+#define HNS_MAC_LANE3_CTLEDFE_REG 0x000BFF9CULL
+#define HNS_MAC_LANE0_STATE_REG 0x000BFFD4ULL
+#define HNS_MAC_LANE1_STATE_REG 0x000BFFC4ULL
+#define HNS_MAC_LANE2_STATE_REG 0x000BFFB4ULL
+#define HNS_MAC_LANE3_STATE_REG 0x000BFFA4ULL
+
+#define HILINK_RESET_TIMOUT 10000
+
+#define DSAF_SRAM_INIT_OVER_0_REG 0x0
+#define DSAF_CFG_0_REG 0x4
+#define DSAF_ECC_ERR_INVERT_0_REG 0x8
+#define DSAF_ABNORMAL_TIMEOUT_0_REG 0x1C
+#define DSAF_FSM_TIMEOUT_0_REG 0x20
+#define DSAF_DSA_REG_CNT_CLR_CE_REG 0x2C
+#define DSAF_DSA_SBM_INF_FIFO_THRD_REG 0x30
+#define DSAF_DSA_SRAM_1BIT_ECC_SEL_REG 0x34
+#define DSAF_DSA_SRAM_1BIT_ECC_CNT_REG 0x38
+#define DSAF_PFC_EN_0_REG 0x50
+#define DSAF_PFC_UNIT_CNT_0_REG 0x70
+#define DSAF_XGE_INT_MSK_0_REG 0x100
+#define DSAF_PPE_INT_MSK_0_REG 0x120
+#define DSAF_ROCEE_INT_MSK_0_REG 0x140
+#define DSAF_XGE_INT_SRC_0_REG 0x160
+#define DSAF_PPE_INT_SRC_0_REG 0x180
+#define DSAF_ROCEE_INT_SRC_0_REG 0x1A0
+#define DSAF_XGE_INT_STS_0_REG 0x1C0
+#define DSAF_PPE_INT_STS_0_REG 0x1E0
+#define DSAF_ROCEE_INT_STS_0_REG 0x200
+#define DSAFV2_SERDES_LBK_0_REG 0x220
+#define DSAF_PAUSE_CFG_REG 0x240
+#define DSAF_ROCE_PORT_MAP_REG 0x2A0
+#define DSAF_ROCE_SL_MAP_REG 0x2A4
+#define DSAF_PPE_QID_CFG_0_REG 0x300
+#define DSAF_SW_PORT_TYPE_0_REG 0x320
+#define DSAF_STP_PORT_TYPE_0_REG 0x340
+#define DSAF_MIX_DEF_QID_0_REG 0x360
+#define DSAF_PORT_DEF_VLAN_0_REG 0x380
+#define DSAF_VM_DEF_VLAN_0_REG 0x400
+
+#define DSAF_INODE_CUT_THROUGH_CFG_0_REG 0x1000
+#define DSAF_INODE_ECC_INVERT_EN_0_REG 0x1008
+#define DSAF_INODE_ECC_ERR_ADDR_0_REG 0x100C
+#define DSAF_INODE_IN_PORT_NUM_0_REG 0x1018
+#define DSAF_INODE_PRI_TC_CFG_0_REG 0x101C
+#define DSAF_INODE_BP_STATUS_0_REG 0x1020
+#define DSAF_INODE_PAD_DISCARD_NUM_0_REG 0x1028
+#define DSAF_INODE_FINAL_IN_MAN_NUM_0_REG 0x102C
+#define DSAF_INODE_FINAL_IN_PKT_NUM_0_REG 0x1030
+#define DSAF_INODE_SBM_PID_NUM_0_REG 0x1038
+#define DSAF_INODE_FINAL_IN_PAUSE_NUM_0_REG 0x103C
+#define DSAFV2_INODE_FINAL_IN_PAUSE_NUM_0_REG 0x1024
+#define DSAF_INODE_SBM_RELS_NUM_0_REG 0x104C
+#define DSAF_INODE_SBM_DROP_NUM_0_REG 0x1050
+#define DSAF_INODE_CRC_FALSE_NUM_0_REG 0x1054
+#define DSAF_INODE_BP_DISCARD_NUM_0_REG 0x1058
+#define DSAF_INODE_RSLT_DISCARD_NUM_0_REG 0x105C
+#define DSAF_INODE_LOCAL_ADDR_FALSE_NUM_0_REG 0x1060
+#define DSAF_INODE_VOQ_OVER_NUM_0_REG 0x1068
+#define DSAF_INODE_BD_SAVE_STATUS_0_REG 0x1900
+#define DSAF_INODE_BD_ORDER_STATUS_0_REG 0x1950
+#define DSAF_INODE_SW_VLAN_TAG_DISC_0_REG 0x1A00
+#define DSAF_INODE_IN_DATA_STP_DISC_0_REG 0x1A50
+#define DSAF_INODE_GE_FC_EN_0_REG 0x1B00
+#define DSAF_INODE_VC0_IN_PKT_NUM_0_REG 0x1B50
+#define DSAF_INODE_VC1_IN_PKT_NUM_0_REG 0x103C
+#define DSAF_INODE_IN_PRIO_PAUSE_BASE_REG 0x1C00
+#define DSAF_INODE_IN_PRIO_PAUSE_BASE_OFFSET 0x100
+#define DSAF_INODE_IN_PRIO_PAUSE_OFFSET 0x50
+
+#define DSAF_SBM_CFG_REG_0_REG 0x2000
+#define DSAF_SBM_BP_CFG_0_XGE_REG_0_REG 0x2004
+#define DSAF_SBM_BP_CFG_0_PPE_REG_0_REG 0x2304
+#define DSAF_SBM_BP_CFG_0_ROCEE_REG_0_REG 0x2604
+#define DSAF_SBM_BP_CFG_1_REG_0_REG 0x2008
+#define DSAF_SBM_BP_CFG_2_XGE_REG_0_REG 0x200C
+#define DSAF_SBM_BP_CFG_2_PPE_REG_0_REG 0x230C
+#define DSAF_SBM_BP_CFG_2_ROCEE_REG_0_REG 0x260C
+#define DSAF_SBM_ROCEE_CFG_REG_REG 0x2380
+#define DSAFV2_SBM_BP_CFG_2_ROCEE_REG_0_REG 0x238C
+#define DSAF_SBM_FREE_CNT_0_0_REG 0x2010
+#define DSAF_SBM_FREE_CNT_1_0_REG 0x2014
+#define DSAF_SBM_BP_CNT_0_0_REG 0x2018
+#define DSAF_SBM_BP_CNT_1_0_REG 0x201C
+#define DSAF_SBM_BP_CNT_2_0_REG 0x2020
+#define DSAF_SBM_BP_CNT_3_0_REG 0x2024
+#define DSAF_SBM_INER_ST_0_REG 0x2028
+#define DSAF_SBM_MIB_REQ_FAILED_TC_0_REG 0x202C
+#define DSAF_SBM_LNK_INPORT_CNT_0_REG 0x2030
+#define DSAF_SBM_LNK_DROP_CNT_0_REG 0x2034
+#define DSAF_SBM_INF_OUTPORT_CNT_0_REG 0x2038
+#define DSAF_SBM_LNK_INPORT_TC0_CNT_0_REG 0x203C
+#define DSAF_SBM_LNK_INPORT_TC1_CNT_0_REG 0x2040
+#define DSAF_SBM_LNK_INPORT_TC2_CNT_0_REG 0x2044
+#define DSAF_SBM_LNK_INPORT_TC3_CNT_0_REG 0x2048
+#define DSAF_SBM_LNK_INPORT_TC4_CNT_0_REG 0x204C
+#define DSAF_SBM_LNK_INPORT_TC5_CNT_0_REG 0x2050
+#define DSAF_SBM_LNK_INPORT_TC6_CNT_0_REG 0x2054
+#define DSAF_SBM_LNK_INPORT_TC7_CNT_0_REG 0x2058
+#define DSAF_SBM_LNK_REQ_CNT_0_REG 0x205C
+#define DSAF_SBM_LNK_RELS_CNT_0_REG 0x2060
+#define DSAF_SBM_BP_CFG_3_REG_0_REG 0x2068
+#define DSAF_SBM_BP_CFG_4_REG_0_REG 0x206C
+
+#define DSAF_XOD_ETS_TSA_TC0_TC3_CFG_0_REG 0x3000
+#define DSAF_XOD_ETS_TSA_TC4_TC7_CFG_0_REG 0x3004
+#define DSAF_XOD_ETS_BW_TC0_TC3_CFG_0_REG 0x3008
+#define DSAF_XOD_ETS_BW_TC4_TC7_CFG_0_REG 0x300C
+#define DSAF_XOD_ETS_BW_OFFSET_CFG_0_REG 0x3010
+#define DSAF_XOD_ETS_TOKEN_CFG_0_REG 0x3014
+#define DSAF_XOD_PFS_CFG_0_0_REG 0x3018
+#define DSAF_XOD_PFS_CFG_1_0_REG 0x301C
+#define DSAF_XOD_PFS_CFG_2_0_REG 0x3020
+#define DSAF_XOD_GNT_L_0_REG 0x3024
+#define DSAF_XOD_GNT_H_0_REG 0x3028
+#define DSAF_XOD_CONNECT_STATE_0_REG 0x302C
+#define DSAF_XOD_RCVPKT_CNT_0_REG 0x3030
+#define DSAF_XOD_RCVTC0_CNT_0_REG 0x3034
+#define DSAF_XOD_RCVTC1_CNT_0_REG 0x3038
+#define DSAF_XOD_RCVTC2_CNT_0_REG 0x303C
+#define DSAF_XOD_RCVTC3_CNT_0_REG 0x3040
+#define DSAF_XOD_RCVVC0_CNT_0_REG 0x3044
+#define DSAF_XOD_RCVVC1_CNT_0_REG 0x3048
+#define DSAF_XOD_XGE_RCVIN0_CNT_0_REG 0x304C
+#define DSAF_XOD_XGE_RCVIN1_CNT_0_REG 0x3050
+#define DSAF_XOD_XGE_RCVIN2_CNT_0_REG 0x3054
+#define DSAF_XOD_XGE_RCVIN3_CNT_0_REG 0x3058
+#define DSAF_XOD_XGE_RCVIN4_CNT_0_REG 0x305C
+#define DSAF_XOD_XGE_RCVIN5_CNT_0_REG 0x3060
+#define DSAF_XOD_XGE_RCVIN6_CNT_0_REG 0x3064
+#define DSAF_XOD_XGE_RCVIN7_CNT_0_REG 0x3068
+#define DSAF_XOD_PPE_RCVIN0_CNT_0_REG 0x306C
+#define DSAF_XOD_PPE_RCVIN1_CNT_0_REG 0x3070
+#define DSAF_XOD_ROCEE_RCVIN0_CNT_0_REG 0x3074
+#define DSAF_XOD_ROCEE_RCVIN1_CNT_0_REG 0x3078
+#define DSAF_XOD_FIFO_STATUS_0_REG 0x307C
+#define DSAF_XOD_XGE_PFC_PRIO_CNT_BASE_REG 0x3A00
+#define DSAF_XOD_XGE_PFC_PRIO_CNT_OFFSET 0x4
+
+#define DSAF_VOQ_ECC_INVERT_EN_0_REG 0x4004
+#define DSAF_VOQ_SRAM_PKT_NUM_0_REG 0x4008
+#define DSAF_VOQ_IN_PKT_NUM_0_REG 0x400C
+#define DSAF_VOQ_OUT_PKT_NUM_0_REG 0x4010
+#define DSAF_VOQ_ECC_ERR_ADDR_0_REG 0x4014
+#define DSAF_VOQ_BP_STATUS_0_REG 0x4018
+#define DSAF_VOQ_SPUP_IDLE_0_REG 0x401C
+#define DSAF_VOQ_XGE_XOD_REQ_0_0_REG 0x4024
+#define DSAF_VOQ_XGE_XOD_REQ_1_0_REG 0x4028
+#define DSAF_VOQ_PPE_XOD_REQ_0_REG 0x402C
+#define DSAF_VOQ_ROCEE_XOD_REQ_0_REG 0x4030
+#define DSAF_VOQ_BP_ALL_THRD_0_REG 0x4034
+
+#define DSAF_TBL_CTRL_0_REG 0x5000
+#define DSAF_TBL_INT_MSK_0_REG 0x5004
+#define DSAF_TBL_INT_SRC_0_REG 0x5008
+#define DSAF_TBL_INT_STS_0_REG 0x5100
+#define DSAF_TBL_TCAM_ADDR_0_REG 0x500C
+#define DSAF_TBL_LINE_ADDR_0_REG 0x5010
+#define DSAF_TBL_TCAM_HIGH_0_REG 0x5014
+#define DSAF_TBL_TCAM_LOW_0_REG 0x5018
+#define DSAF_TBL_TCAM_MCAST_CFG_4_0_REG 0x501C
+#define DSAF_TBL_TCAM_MCAST_CFG_3_0_REG 0x5020
+#define DSAF_TBL_TCAM_MCAST_CFG_2_0_REG 0x5024
+#define DSAF_TBL_TCAM_MCAST_CFG_1_0_REG 0x5028
+#define DSAF_TBL_TCAM_MCAST_CFG_0_0_REG 0x502C
+#define DSAF_TBL_TCAM_UCAST_CFG_0_REG 0x5030
+#define DSAF_TBL_LIN_CFG_0_REG 0x5034
+#define DSAF_TBL_TCAM_RDATA_HIGH_0_REG 0x5038
+#define DSAF_TBL_TCAM_RDATA_LOW_0_REG 0x503C
+#define DSAF_TBL_TCAM_RAM_RDATA4_0_REG 0x5040
+#define DSAF_TBL_TCAM_RAM_RDATA3_0_REG 0x5044
+#define DSAF_TBL_TCAM_RAM_RDATA2_0_REG 0x5048
+#define DSAF_TBL_TCAM_RAM_RDATA1_0_REG 0x504C
+#define DSAF_TBL_TCAM_RAM_RDATA0_0_REG 0x5050
+#define DSAF_TBL_LIN_RDATA_0_REG 0x5054
+#define DSAF_TBL_DA0_MIS_INFO1_0_REG 0x5058
+#define DSAF_TBL_DA0_MIS_INFO0_0_REG 0x505C
+#define DSAF_TBL_SA_MIS_INFO2_0_REG 0x5104
+#define DSAF_TBL_SA_MIS_INFO1_0_REG 0x5098
+#define DSAF_TBL_SA_MIS_INFO0_0_REG 0x509C
+#define DSAF_TBL_PUL_0_REG 0x50A0
+#define DSAF_TBL_OLD_RSLT_0_REG 0x50A4
+#define DSAF_TBL_OLD_SCAN_VAL_0_REG 0x50A8
+#define DSAF_TBL_DFX_CTRL_0_REG 0x50AC
+#define DSAF_TBL_DFX_STAT_0_REG 0x50B0
+#define DSAF_TBL_DFX_STAT_2_0_REG 0x5108
+#define DSAF_TBL_LKUP_NUM_I_0_REG 0x50C0
+#define DSAF_TBL_LKUP_NUM_O_0_REG 0x50E0
+#define DSAF_TBL_UCAST_BCAST_MIS_INFO_0_0_REG 0x510C
+#define DSAF_TBL_TCAM_MATCH_CFG_H_REG 0x5130
+#define DSAF_TBL_TCAM_MATCH_CFG_L_REG 0x5134
+
+#define DSAF_INODE_FIFO_WL_0_REG 0x6000
+#define DSAF_ONODE_FIFO_WL_0_REG 0x6020
+#define DSAF_XGE_GE_WORK_MODE_0_REG 0x6040
+#define DSAF_XGE_APP_RX_LINK_UP_0_REG 0x6080
+#define DSAF_NETPORT_CTRL_SIG_0_REG 0x60A0
+#define DSAF_XGE_CTRL_SIG_CFG_0_REG 0x60C0
+
+#define PPE_COM_CFG_QID_MODE_REG 0x0
+#define PPE_COM_INTEN_REG 0x110
+#define PPE_COM_RINT_REG 0x114
+#define PPE_COM_INTSTS_REG 0x118
+#define PPE_COM_HIS_RX_PKT_QID_DROP_CNT_REG 0x300
+#define PPE_COM_HIS_RX_PKT_QID_OK_CNT_REG 0x600
+#define PPE_COM_HIS_TX_PKT_QID_ERR_CNT_REG 0x900
+#define PPE_COM_HIS_TX_PKT_QID_OK_CNT_REG 0xC00
+#define PPE_COM_COMMON_CNT_CLR_CE_REG 0x1120
+
+#define PPE_CFG_TX_FIFO_THRSLD_REG 0x0
+#define PPE_CFG_RX_FIFO_THRSLD_REG 0x4
+#define PPE_CFG_RX_FIFO_PAUSE_THRSLD_REG 0x8
+#define PPE_CFG_RX_FIFO_SW_BP_THRSLD_REG 0xC
+#define PPE_CFG_PAUSE_IDLE_CNT_REG 0x10
+#define PPE_CFG_BUS_CTRL_REG 0x40
+#define PPE_CFG_TNL_TO_BE_RST_REG 0x48
+#define PPE_CURR_TNL_CAN_RST_REG 0x4C
+#define PPE_CFG_XGE_MODE_REG 0x80
+#define PPE_CFG_MAX_FRAME_LEN_REG 0x84
+#define PPE_CFG_RX_PKT_MODE_REG 0x88
+#define PPE_CFG_RX_VLAN_TAG_REG 0x8C
+#define PPE_CFG_TAG_GEN_REG 0x90
+#define PPE_CFG_PARSE_TAG_REG 0x94
+#define PPE_CFG_PRO_CHECK_EN_REG 0x98
+#define PPEV2_CFG_TSO_EN_REG 0xA0
+#define PPEV2_VLAN_STRIP_EN_REG 0xAC
+#define PPE_INTEN_REG 0x100
+#define PPE_RINT_REG 0x104
+#define PPE_INTSTS_REG 0x108
+#define PPE_CFG_RX_PKT_INT_REG 0x140
+#define PPE_CFG_HEAT_DECT_TIME0_REG 0x144
+#define PPE_CFG_HEAT_DECT_TIME1_REG 0x148
+#define PPE_HIS_RX_SW_PKT_CNT_REG 0x200
+#define PPE_HIS_RX_WR_BD_OK_PKT_CNT_REG 0x204
+#define PPE_HIS_RX_PKT_NO_BUF_CNT_REG 0x208
+#define PPE_HIS_TX_BD_CNT_REG 0x20C
+#define PPE_HIS_TX_PKT_CNT_REG 0x210
+#define PPE_HIS_TX_PKT_OK_CNT_REG 0x214
+#define PPE_HIS_TX_PKT_EPT_CNT_REG 0x218
+#define PPE_HIS_TX_PKT_CS_FAIL_CNT_REG 0x21C
+#define PPE_HIS_RX_APP_BUF_FAIL_CNT_REG 0x220
+#define PPE_HIS_RX_APP_BUF_WAIT_CNT_REG 0x224
+#define PPE_HIS_RX_PKT_DROP_FUL_CNT_REG 0x228
+#define PPE_HIS_RX_PKT_DROP_PRT_CNT_REG 0x22C
+#define PPE_TNL_0_5_CNT_CLR_CE_REG 0x300
+#define PPE_CFG_AXI_DBG_REG 0x304
+#define PPE_HIS_PRO_ERR_REG 0x308
+#define PPE_HIS_TNL_FIFO_ERR_REG 0x30C
+#define PPE_CURR_CFF_DATA_NUM_REG 0x310
+#define PPE_CURR_RX_ST_REG 0x314
+#define PPE_CURR_TX_ST_REG 0x318
+#define PPE_CURR_RX_FIFO0_REG 0x31C
+#define PPE_CURR_RX_FIFO1_REG 0x320
+#define PPE_CURR_TX_FIFO0_REG 0x324
+#define PPE_CURR_TX_FIFO1_REG 0x328
+#define PPE_ECO0_REG 0x32C
+#define PPE_ECO1_REG 0x330
+#define PPE_ECO2_REG 0x334
+#define PPEV2_INDRECTION_TBL_REG 0x800
+#define PPEV2_RSS_KEY_REG 0x900
+
+#define RCB_COM_CFG_ENDIAN_REG 0x0
+#define RCB_COM_CFG_SYS_FSH_REG 0xC
+#define RCB_COM_CFG_INIT_FLAG_REG 0x10
+#define RCB_COM_CFG_PKT_REG 0x30
+#define RCB_COM_CFG_RINVLD_REG 0x34
+#define RCB_COM_CFG_FNA_REG 0x38
+#define RCB_COM_CFG_FA_REG 0x3C
+#define RCB_COM_CFG_PKT_TC_BP_REG 0x40
+#define RCB_COM_CFG_PPE_TNL_CLKEN_REG 0x44
+#define RCBV2_COM_CFG_USER_REG 0x30
+#define RCBV2_COM_CFG_TSO_MODE_REG 0x50
+
+#define RCB_COM_INTMSK_TX_PKT_REG 0x3A0
+#define RCB_COM_RINT_TX_PKT_REG 0x3A8
+#define RCB_COM_INTMASK_ECC_ERR_REG 0x400
+#define RCB_COM_INTSTS_ECC_ERR_REG 0x408
+#define RCB_COM_EBD_SRAM_ERR_REG 0x410
+#define RCB_COM_RXRING_ERR_REG 0x41C
+#define RCB_COM_TXRING_ERR_REG 0x420
+#define RCB_COM_TX_FBD_ERR_REG 0x424
+#define RCB_SRAM_ECC_CHK_EN_REG 0x428
+#define RCB_SRAM_ECC_CHK0_REG 0x42C
+#define RCB_SRAM_ECC_CHK1_REG 0x430
+#define RCB_SRAM_ECC_CHK2_REG 0x434
+#define RCB_SRAM_ECC_CHK3_REG 0x438
+#define RCB_SRAM_ECC_CHK4_REG 0x43c
+#define RCB_SRAM_ECC_CHK5_REG 0x440
+#define RCB_ECC_ERR_ADDR0_REG 0x450
+#define RCB_ECC_ERR_ADDR3_REG 0x45C
+#define RCB_ECC_ERR_ADDR4_REG 0x460
+#define RCB_ECC_ERR_ADDR5_REG 0x464
+
+#define RCB_COM_SF_CFG_INTMASK_RING 0x470
+#define RCB_COM_SF_CFG_RING_STS 0x474
+#define RCB_COM_SF_CFG_RING 0x478
+#define RCB_COM_SF_CFG_INTMASK_BD 0x47C
+#define RCB_COM_SF_CFG_BD_RINT_STS 0x480
+#define RCB_COM_RCB_RD_BD_BUSY 0x490
+#define RCB_COM_RCB_FBD_CRT_EN 0x494
+#define RCB_COM_AXI_WR_ERR_INTMASK 0x498
+#define RCB_COM_AXI_ERR_STS 0x49C
+#define RCB_COM_CHK_TX_FBD_NUM_REG 0x4a0
+
+#define RCB_CFG_BD_NUM_REG 0x9000
+#define RCB_CFG_PKTLINE_REG 0x9050
+
+#define RCB_CFG_OVERTIME_REG 0x9300
+#define RCB_CFG_PKTLINE_INT_NUM_REG 0x9304
+#define RCB_CFG_OVERTIME_INT_NUM_REG 0x9308
+#define RCB_PORT_INT_GAPTIME_REG 0x9400
+#define RCB_PORT_CFG_OVERTIME_REG 0x9430
+
+#define RCB_RING_RX_RING_BASEADDR_L_REG 0x00000
+#define RCB_RING_RX_RING_BASEADDR_H_REG 0x00004
+#define RCB_RING_RX_RING_BD_NUM_REG 0x00008
+#define RCB_RING_RX_RING_BD_LEN_REG 0x0000C
+#define RCB_RING_RX_RING_PKTLINE_REG 0x00010
+#define RCB_RING_RX_RING_TAIL_REG 0x00018
+#define RCB_RING_RX_RING_HEAD_REG 0x0001C
+#define RCB_RING_RX_RING_FBDNUM_REG 0x00020
+#define RCB_RING_RX_RING_PKTNUM_RECORD_REG 0x0002C
+
+#define RCB_RING_TX_RING_BASEADDR_L_REG 0x00040
+#define RCB_RING_TX_RING_BASEADDR_H_REG 0x00044
+#define RCB_RING_TX_RING_BD_NUM_REG 0x00048
+#define RCB_RING_TX_RING_BD_LEN_REG 0x0004C
+#define RCB_RING_TX_RING_PKTLINE_REG 0x00050
+#define RCB_RING_TX_RING_TAIL_REG 0x00058
+#define RCB_RING_TX_RING_HEAD_REG 0x0005C
+#define RCB_RING_TX_RING_FBDNUM_REG 0x00060
+#define RCB_RING_TX_RING_OFFSET_REG 0x00064
+#define RCB_RING_TX_RING_PKTNUM_RECORD_REG 0x0006C
+
+#define RCB_RING_PREFETCH_EN_REG 0x0007C
+#define RCB_RING_CFG_VF_NUM_REG 0x00080
+#define RCB_RING_ASID_REG 0x0008C
+#define RCB_RING_RX_VM_REG 0x00090
+#define RCB_RING_T0_BE_RST 0x00094
+#define RCB_RING_COULD_BE_RST 0x00098
+#define RCB_RING_WRR_WEIGHT_REG 0x0009c
+
+#define RCB_RING_INTMSK_RXWL_REG 0x000A0
+#define RCB_RING_INTSTS_RX_RING_REG 0x000A4
+#define RCBV2_RX_RING_INT_STS_REG 0x000A8
+#define RCB_RING_INTMSK_TXWL_REG 0x000AC
+#define RCB_RING_INTSTS_TX_RING_REG 0x000B0
+#define RCBV2_TX_RING_INT_STS_REG 0x000B4
+#define RCB_RING_INTMSK_RX_OVERTIME_REG 0x000B8
+#define RCB_RING_INTSTS_RX_OVERTIME_REG 0x000BC
+#define RCB_RING_INTMSK_TX_OVERTIME_REG 0x000C4
+#define RCB_RING_INTSTS_TX_OVERTIME_REG 0x000C8
+
+#define GMAC_FIFO_STATE_REG 0x0000UL
+#define GMAC_DUPLEX_TYPE_REG 0x0008UL
+#define GMAC_FD_FC_TYPE_REG 0x000CUL
+#define GMAC_TX_WATER_LINE_REG 0x0010UL
+#define GMAC_FC_TX_TIMER_REG 0x001CUL
+#define GMAC_FD_FC_ADDR_LOW_REG 0x0020UL
+#define GMAC_FD_FC_ADDR_HIGH_REG 0x0024UL
+#define GMAC_IPG_TX_TIMER_REG 0x0030UL
+#define GMAC_PAUSE_THR_REG 0x0038UL
+#define GMAC_MAX_FRM_SIZE_REG 0x003CUL
+#define GMAC_PORT_MODE_REG 0x0040UL
+#define GMAC_PORT_EN_REG 0x0044UL
+#define GMAC_PAUSE_EN_REG 0x0048UL
+#define GMAC_SHORT_RUNTS_THR_REG 0x0050UL
+#define GMAC_AN_NEG_STATE_REG 0x0058UL
+#define GMAC_TX_LOCAL_PAGE_REG 0x005CUL
+#define GMAC_TRANSMIT_CONTROL_REG 0x0060UL
+#define GMAC_REC_FILT_CONTROL_REG 0x0064UL
+#define GMAC_PTP_CONFIG_REG 0x0074UL
+
+#define GMAC_RX_OCTETS_TOTAL_OK_REG 0x0080UL
+#define GMAC_RX_OCTETS_BAD_REG 0x0084UL
+#define GMAC_RX_UC_PKTS_REG 0x0088UL
+#define GMAC_RX_MC_PKTS_REG 0x008CUL
+#define GMAC_RX_BC_PKTS_REG 0x0090UL
+#define GMAC_RX_PKTS_64OCTETS_REG 0x0094UL
+#define GMAC_RX_PKTS_65TO127OCTETS_REG 0x0098UL
+#define GMAC_RX_PKTS_128TO255OCTETS_REG 0x009CUL
+#define GMAC_RX_PKTS_255TO511OCTETS_REG 0x00A0UL
+#define GMAC_RX_PKTS_512TO1023OCTETS_REG 0x00A4UL
+#define GMAC_RX_PKTS_1024TO1518OCTETS_REG 0x00A8UL
+#define GMAC_RX_PKTS_1519TOMAXOCTETS_REG 0x00ACUL
+#define GMAC_RX_FCS_ERRORS_REG 0x00B0UL
+#define GMAC_RX_TAGGED_REG 0x00B4UL
+#define GMAC_RX_DATA_ERR_REG 0x00B8UL
+#define GMAC_RX_ALIGN_ERRORS_REG 0x00BCUL
+#define GMAC_RX_LONG_ERRORS_REG 0x00C0UL
+#define GMAC_RX_JABBER_ERRORS_REG 0x00C4UL
+#define GMAC_RX_PAUSE_MACCTRL_FRAM_REG 0x00C8UL
+#define GMAC_RX_UNKNOWN_MACCTRL_FRAM_REG 0x00CCUL
+#define GMAC_RX_VERY_LONG_ERR_CNT_REG 0x00D0UL
+#define GMAC_RX_RUNT_ERR_CNT_REG 0x00D4UL
+#define GMAC_RX_SHORT_ERR_CNT_REG 0x00D8UL
+#define GMAC_RX_FILT_PKT_CNT_REG 0x00E8UL
+#define GMAC_RX_OCTETS_TOTAL_FILT_REG 0x00ECUL
+#define GMAC_OCTETS_TRANSMITTED_OK_REG 0x0100UL
+#define GMAC_OCTETS_TRANSMITTED_BAD_REG 0x0104UL
+#define GMAC_TX_UC_PKTS_REG 0x0108UL
+#define GMAC_TX_MC_PKTS_REG 0x010CUL
+#define GMAC_TX_BC_PKTS_REG 0x0110UL
+#define GMAC_TX_PKTS_64OCTETS_REG 0x0114UL
+#define GMAC_TX_PKTS_65TO127OCTETS_REG 0x0118UL
+#define GMAC_TX_PKTS_128TO255OCTETS_REG 0x011CUL
+#define GMAC_TX_PKTS_255TO511OCTETS_REG 0x0120UL
+#define GMAC_TX_PKTS_512TO1023OCTETS_REG 0x0124UL
+#define GMAC_TX_PKTS_1024TO1518OCTETS_REG 0x0128UL
+#define GMAC_TX_PKTS_1519TOMAXOCTETS_REG 0x012CUL
+#define GMAC_TX_EXCESSIVE_LENGTH_DROP_REG 0x014CUL
+#define GMAC_TX_UNDERRUN_REG 0x0150UL
+#define GMAC_TX_TAGGED_REG 0x0154UL
+#define GMAC_TX_CRC_ERROR_REG 0x0158UL
+#define GMAC_TX_PAUSE_FRAMES_REG 0x015CUL
+#define GAMC_RX_MAX_FRAME 0x0170UL
+#define GMAC_LINE_LOOP_BACK_REG 0x01A8UL
+#define GMAC_CF_CRC_STRIP_REG 0x01B0UL
+#define GMAC_MODE_CHANGE_EN_REG 0x01B4UL
+#define GMAC_SIXTEEN_BIT_CNTR_REG 0x01CCUL
+#define GMAC_LD_LINK_COUNTER_REG 0x01D0UL
+#define GMAC_LOOP_REG 0x01DCUL
+#define GMAC_RECV_CONTROL_REG 0x01E0UL
+#define GMAC_PCS_RX_EN_REG 0x01E4UL
+#define GMAC_VLAN_CODE_REG 0x01E8UL
+#define GMAC_RX_OVERRUN_CNT_REG 0x01ECUL
+#define GMAC_RX_LENGTHFIELD_ERR_CNT_REG 0x01F4UL
+#define GMAC_RX_FAIL_COMMA_CNT_REG 0x01F8UL
+#define GMAC_STATION_ADDR_LOW_0_REG 0x0200UL
+#define GMAC_STATION_ADDR_HIGH_0_REG 0x0204UL
+#define GMAC_STATION_ADDR_LOW_1_REG 0x0208UL
+#define GMAC_STATION_ADDR_HIGH_1_REG 0x020CUL
+#define GMAC_STATION_ADDR_LOW_2_REG 0x0210UL
+#define GMAC_STATION_ADDR_HIGH_2_REG 0x0214UL
+#define GMAC_STATION_ADDR_LOW_3_REG 0x0218UL
+#define GMAC_STATION_ADDR_HIGH_3_REG 0x021CUL
+#define GMAC_STATION_ADDR_LOW_4_REG 0x0220UL
+#define GMAC_STATION_ADDR_HIGH_4_REG 0x0224UL
+#define GMAC_STATION_ADDR_LOW_5_REG 0x0228UL
+#define GMAC_STATION_ADDR_HIGH_5_REG 0x022CUL
+#define GMAC_STATION_ADDR_LOW_MSK_0_REG 0x0230UL
+#define GMAC_STATION_ADDR_HIGH_MSK_0_REG 0x0234UL
+#define GMAC_STATION_ADDR_LOW_MSK_1_REG 0x0238UL
+#define GMAC_STATION_ADDR_HIGH_MSK_1_REG 0x023CUL
+#define GMAC_MAC_SKIP_LEN_REG 0x0240UL
+#define GMAC_TX_LOOP_PKT_PRI_REG 0x0378UL
+
+#define XGMAC_INT_STATUS_REG 0x0
+#define XGMAC_INT_ENABLE_REG 0x4
+#define XGMAC_INT_SET_REG 0x8
+#define XGMAC_IERR_U_INFO_REG 0xC
+#define XGMAC_OVF_INFO_REG 0x10
+#define XGMAC_OVF_CNT_REG 0x14
+#define XGMAC_PORT_MODE_REG 0x40
+#define XGMAC_CLK_ENABLE_REG 0x44
+#define XGMAC_RESET_REG 0x48
+#define XGMAC_LINK_CONTROL_REG 0x50
+#define XGMAC_LINK_STATUS_REG 0x54
+#define XGMAC_SPARE_REG 0xC0
+#define XGMAC_SPARE_CNT_REG 0xC4
+
+#define XGMAC_MAC_ENABLE_REG 0x100
+#define XGMAC_MAC_CONTROL_REG 0x104
+#define XGMAC_MAC_IPG_REG 0x120
+#define XGMAC_MAC_MSG_CRC_EN_REG 0x124
+#define XGMAC_MAC_MSG_IMG_REG 0x128
+#define XGMAC_MAC_MSG_FC_CFG_REG 0x12C
+#define XGMAC_MAC_MSG_TC_CFG_REG 0x130
+#define XGMAC_MAC_PAD_SIZE_REG 0x134
+#define XGMAC_MAC_MIN_PKT_SIZE_REG 0x138
+#define XGMAC_MAC_MAX_PKT_SIZE_REG 0x13C
+#define XGMAC_MAC_PAUSE_CTRL_REG 0x160
+#define XGMAC_MAC_PAUSE_TIME_REG 0x164
+#define XGMAC_MAC_PAUSE_GAP_REG 0x168
+#define XGMAC_MAC_PAUSE_LOCAL_MAC_H_REG 0x16C
+#define XGMAC_MAC_PAUSE_LOCAL_MAC_L_REG 0x170
+#define XGMAC_MAC_PAUSE_PEER_MAC_H_REG 0x174
+#define XGMAC_MAC_PAUSE_PEER_MAC_L_REG 0x178
+#define XGMAC_MAC_PFC_PRI_EN_REG 0x17C
+#define XGMAC_MAC_1588_CTRL_REG 0x180
+#define XGMAC_MAC_1588_TX_PORT_DLY_REG 0x184
+#define XGMAC_MAC_1588_RX_PORT_DLY_REG 0x188
+#define XGMAC_MAC_1588_ASYM_DLY_REG 0x18C
+#define XGMAC_MAC_1588_ADJUST_CFG_REG 0x190
+#define XGMAC_MAC_Y1731_ETH_TYPE_REG 0x194
+#define XGMAC_MAC_MIB_CONTROL_REG 0x198
+#define XGMAC_MAC_WAN_RATE_ADJUST_REG 0x19C
+#define XGMAC_MAC_TX_ERR_MARK_REG 0x1A0
+#define XGMAC_MAC_TX_LF_RF_CONTROL_REG 0x1A4
+#define XGMAC_MAC_RX_LF_RF_STATUS_REG 0x1A8
+#define XGMAC_MAC_TX_RUNT_PKT_CNT_REG 0x1C0
+#define XGMAC_MAC_RX_RUNT_PKT_CNT_REG 0x1C4
+#define XGMAC_MAC_RX_PREAM_ERR_PKT_CNT_REG 0x1C8
+#define XGMAC_MAC_TX_LF_RF_TERM_PKT_CNT_REG 0x1CC
+#define XGMAC_MAC_TX_SN_MISMATCH_PKT_CNT_REG 0x1D0
+#define XGMAC_MAC_RX_ERR_MSG_CNT_REG 0x1D4
+#define XGMAC_MAC_RX_ERR_EFD_CNT_REG 0x1D8
+#define XGMAC_MAC_ERR_INFO_REG 0x1DC
+#define XGMAC_MAC_DBG_INFO_REG 0x1E0
+
+#define XGMAC_PCS_BASER_SYNC_THD_REG 0x330
+#define XGMAC_PCS_STATUS1_REG 0x404
+#define XGMAC_PCS_BASER_STATUS1_REG 0x410
+#define XGMAC_PCS_BASER_STATUS2_REG 0x414
+#define XGMAC_PCS_BASER_SEEDA_0_REG 0x420
+#define XGMAC_PCS_BASER_SEEDA_1_REG 0x424
+#define XGMAC_PCS_BASER_SEEDB_0_REG 0x428
+#define XGMAC_PCS_BASER_SEEDB_1_REG 0x42C
+#define XGMAC_PCS_BASER_TEST_CONTROL_REG 0x430
+#define XGMAC_PCS_BASER_TEST_ERR_CNT_REG 0x434
+#define XGMAC_PCS_DBG_INFO_REG 0x4C0
+#define XGMAC_PCS_DBG_INFO1_REG 0x4C4
+#define XGMAC_PCS_DBG_INFO2_REG 0x4C8
+#define XGMAC_PCS_DBG_INFO3_REG 0x4CC
+
+#define XGMAC_PMA_ENABLE_REG 0x700
+#define XGMAC_PMA_CONTROL_REG 0x704
+#define XGMAC_PMA_SIGNAL_STATUS_REG 0x708
+#define XGMAC_PMA_DBG_INFO_REG 0x70C
+#define XGMAC_PMA_FEC_ABILITY_REG 0x740
+#define XGMAC_PMA_FEC_CONTROL_REG 0x744
+#define XGMAC_PMA_FEC_CORR_BLOCK_CNT__REG 0x750
+#define XGMAC_PMA_FEC_UNCORR_BLOCK_CNT__REG 0x760
+
+#define XGMAC_TX_PKTS_FRAGMENT 0x0000
+#define XGMAC_TX_PKTS_UNDERSIZE 0x0008
+#define XGMAC_TX_PKTS_UNDERMIN 0x0010
+#define XGMAC_TX_PKTS_64OCTETS 0x0018
+#define XGMAC_TX_PKTS_65TO127OCTETS 0x0020
+#define XGMAC_TX_PKTS_128TO255OCTETS 0x0028
+#define XGMAC_TX_PKTS_256TO511OCTETS 0x0030
+#define XGMAC_TX_PKTS_512TO1023OCTETS 0x0038
+#define XGMAC_TX_PKTS_1024TO1518OCTETS 0x0040
+#define XGMAC_TX_PKTS_1519TOMAXOCTETS 0x0048
+#define XGMAC_TX_PKTS_1519TOMAXOCTETSOK 0x0050
+#define XGMAC_TX_PKTS_OVERSIZE 0x0058
+#define XGMAC_TX_PKTS_JABBER 0x0060
+#define XGMAC_TX_GOODPKTS 0x0068
+#define XGMAC_TX_GOODOCTETS 0x0070
+#define XGMAC_TX_TOTAL_PKTS 0x0078
+#define XGMAC_TX_TOTALOCTETS 0x0080
+#define XGMAC_TX_UNICASTPKTS 0x0088
+#define XGMAC_TX_MULTICASTPKTS 0x0090
+#define XGMAC_TX_BROADCASTPKTS 0x0098
+#define XGMAC_TX_PRI0PAUSEPKTS 0x00a0
+#define XGMAC_TX_PRI1PAUSEPKTS 0x00a8
+#define XGMAC_TX_PRI2PAUSEPKTS 0x00b0
+#define XGMAC_TX_PRI3PAUSEPKTS 0x00b8
+#define XGMAC_TX_PRI4PAUSEPKTS 0x00c0
+#define XGMAC_TX_PRI5PAUSEPKTS 0x00c8
+#define XGMAC_TX_PRI6PAUSEPKTS 0x00d0
+#define XGMAC_TX_PRI7PAUSEPKTS 0x00d8
+#define XGMAC_TX_MACCTRLPKTS 0x00e0
+#define XGMAC_TX_1731PKTS 0x00e8
+#define XGMAC_TX_1588PKTS 0x00f0
+#define XGMAC_RX_FROMAPPGOODPKTS 0x00f8
+#define XGMAC_RX_FROMAPPBADPKTS 0x0100
+#define XGMAC_TX_ERRALLPKTS 0x0108
+
+#define XGMAC_RX_PKTS_FRAGMENT 0x0110
+#define XGMAC_RX_PKTSUNDERSIZE 0x0118
+#define XGMAC_RX_PKTS_UNDERMIN 0x0120
+#define XGMAC_RX_PKTS_64OCTETS 0x0128
+#define XGMAC_RX_PKTS_65TO127OCTETS 0x0130
+#define XGMAC_RX_PKTS_128TO255OCTETS 0x0138
+#define XGMAC_RX_PKTS_256TO511OCTETS 0x0140
+#define XGMAC_RX_PKTS_512TO1023OCTETS 0x0148
+#define XGMAC_RX_PKTS_1024TO1518OCTETS 0x0150
+#define XGMAC_RX_PKTS_1519TOMAXOCTETS 0x0158
+#define XGMAC_RX_PKTS_1519TOMAXOCTETSOK 0x0160
+#define XGMAC_RX_PKTS_OVERSIZE 0x0168
+#define XGMAC_RX_PKTS_JABBER 0x0170
+#define XGMAC_RX_GOODPKTS 0x0178
+#define XGMAC_RX_GOODOCTETS 0x0180
+#define XGMAC_RX_TOTAL_PKTS 0x0188
+#define XGMAC_RX_TOTALOCTETS 0x0190
+#define XGMAC_RX_UNICASTPKTS 0x0198
+#define XGMAC_RX_MULTICASTPKTS 0x01a0
+#define XGMAC_RX_BROADCASTPKTS 0x01a8
+#define XGMAC_RX_PRI0PAUSEPKTS 0x01b0
+#define XGMAC_RX_PRI1PAUSEPKTS 0x01b8
+#define XGMAC_RX_PRI2PAUSEPKTS 0x01c0
+#define XGMAC_RX_PRI3PAUSEPKTS 0x01c8
+#define XGMAC_RX_PRI4PAUSEPKTS 0x01d0
+#define XGMAC_RX_PRI5PAUSEPKTS 0x01d8
+#define XGMAC_RX_PRI6PAUSEPKTS 0x01e0
+#define XGMAC_RX_PRI7PAUSEPKTS 0x01e8
+#define XGMAC_RX_MACCTRLPKTS 0x01f0
+#define XGMAC_TX_SENDAPPGOODPKTS 0x01f8
+#define XGMAC_TX_SENDAPPBADPKTS 0x0200
+#define XGMAC_RX_1731PKTS 0x0208
+#define XGMAC_RX_SYMBOLERRPKTS 0x0210
+#define XGMAC_RX_FCSERRPKTS 0x0218
+
+#define DSAF_SRAM_INIT_OVER_M 0xff
+#define DSAFV2_SRAM_INIT_OVER_M 0x3ff
+#define DSAF_SRAM_INIT_OVER_S 0
+
+#define DSAF_CFG_EN_S 0
+#define DSAF_CFG_TC_MODE_S 1
+#define DSAF_CFG_CRC_EN_S 2
+#define DSAF_CFG_SBM_INIT_S 3
+#define DSAF_CFG_MIX_MODE_S 4
+#define DSAF_CFG_STP_MODE_S 5
+#define DSAF_CFG_LOCA_ADDR_EN_S 6
+#define DSAFV2_CFG_VLAN_TAG_MODE_S 17
+
+#define DSAF_CNT_CLR_CE_S 0
+#define DSAF_SNAP_EN_S 1
+
+#define HNS_DSAF_PFC_UNIT_CNT_FOR_XGE 41
+#define HNS_DSAF_PFC_UNIT_CNT_FOR_GE_1000 410
+#define HNS_DSAF_PFC_UNIT_CNT_FOR_GE_2500 103
+
+#define DSAF_PFC_UNINT_CNT_M ((1ULL << 9) - 1)
+#define DSAF_PFC_UNINT_CNT_S 0
+
+#define DSAF_MAC_PAUSE_RX_EN_B 2
+#define DSAF_PFC_PAUSE_RX_EN_B 1
+#define DSAF_PFC_PAUSE_TX_EN_B 0
+
+#define DSAF_PPE_QID_CFG_M 0xFF
+#define DSAF_PPE_QID_CFG_S 0
+
+#define DSAF_SW_PORT_TYPE_M 3
+#define DSAF_SW_PORT_TYPE_S 0
+
+#define DSAF_STP_PORT_TYPE_M 7
+#define DSAF_STP_PORT_TYPE_S 0
+
+#define DSAF_INODE_IN_PORT_NUM_M 7
+#define DSAF_INODE_IN_PORT_NUM_S 0
+#define DSAFV2_INODE_IN_PORT1_NUM_M (7ULL << 3)
+#define DSAFV2_INODE_IN_PORT1_NUM_S 3
+#define DSAFV2_INODE_IN_PORT2_NUM_M (7ULL << 6)
+#define DSAFV2_INODE_IN_PORT2_NUM_S 6
+#define DSAFV2_INODE_IN_PORT3_NUM_M (7ULL << 9)
+#define DSAFV2_INODE_IN_PORT3_NUM_S 9
+#define DSAFV2_INODE_IN_PORT4_NUM_M (7ULL << 12)
+#define DSAFV2_INODE_IN_PORT4_NUM_S 12
+#define DSAFV2_INODE_IN_PORT5_NUM_M (7ULL << 15)
+#define DSAFV2_INODE_IN_PORT5_NUM_S 15
+
+#define HNS_DSAF_I4TC_CFG 0x18688688
+#define HNS_DSAF_I8TC_CFG 0x18FAC688
+
+#define DSAF_SBM_CFG_SHCUT_EN_S 0
+#define DSAF_SBM_CFG_EN_S 1
+#define DSAF_SBM_CFG_MIB_EN_S 2
+#define DSAF_SBM_CFG_ECC_INVERT_EN_S 3
+
+#define DSAF_SBM_CFG0_VC1_MAX_BUF_NUM_S 0
+#define DSAF_SBM_CFG0_VC1_MAX_BUF_NUM_M (((1ULL << 10) - 1) << 0)
+#define DSAF_SBM_CFG0_VC0_MAX_BUF_NUM_S 10
+#define DSAF_SBM_CFG0_VC0_MAX_BUF_NUM_M (((1ULL << 10) - 1) << 10)
+#define DSAF_SBM_CFG0_COM_MAX_BUF_NUM_S 20
+#define DSAF_SBM_CFG0_COM_MAX_BUF_NUM_M (((1ULL << 11) - 1) << 20)
+
+#define DSAF_SBM_CFG1_TC4_MAX_BUF_NUM_S 0
+#define DSAF_SBM_CFG1_TC4_MAX_BUF_NUM_M (((1ULL << 10) - 1) << 0)
+#define DSAF_SBM_CFG1_TC0_MAX_BUF_NUM_S 10
+#define DSAF_SBM_CFG1_TC0_MAX_BUF_NUM_M (((1ULL << 10) - 1) << 10)
+
+#define DSAF_SBM_CFG2_SET_BUF_NUM_S 0
+#define DSAF_SBM_CFG2_SET_BUF_NUM_M (((1ULL << 10) - 1) << 0)
+#define DSAF_SBM_CFG2_RESET_BUF_NUM_S 10
+#define DSAF_SBM_CFG2_RESET_BUF_NUM_M (((1ULL << 10) - 1) << 10)
+
+#define DSAF_SBM_CFG3_SET_BUF_NUM_NO_PFC_S 0
+#define DSAF_SBM_CFG3_SET_BUF_NUM_NO_PFC_M (((1ULL << 10) - 1) << 0)
+#define DSAF_SBM_CFG3_RESET_BUF_NUM_NO_PFC_S 10
+#define DSAF_SBM_CFG3_RESET_BUF_NUM_NO_PFC_M (((1ULL << 10) - 1) << 10)
+
+#define DSAFV2_SBM_CFG0_VC1_MAX_BUF_NUM_S 0
+#define DSAFV2_SBM_CFG0_VC1_MAX_BUF_NUM_M (((1ULL << 9) - 1) << 0)
+#define DSAFV2_SBM_CFG0_VC0_MAX_BUF_NUM_S 9
+#define DSAFV2_SBM_CFG0_VC0_MAX_BUF_NUM_M (((1ULL << 9) - 1) << 9)
+#define DSAFV2_SBM_CFG0_COM_MAX_BUF_NUM_S 18
+#define DSAFV2_SBM_CFG0_COM_MAX_BUF_NUM_M (((1ULL << 10) - 1) << 18)
+
+#define DSAFV2_SBM_CFG1_TC4_MAX_BUF_NUM_S 0
+#define DSAFV2_SBM_CFG1_TC4_MAX_BUF_NUM_M (((1ULL << 9) - 1) << 0)
+#define DSAFV2_SBM_CFG1_TC0_MAX_BUF_NUM_S 9
+#define DSAFV2_SBM_CFG1_TC0_MAX_BUF_NUM_M (((1ULL << 9) - 1) << 9)
+
+#define DSAFV2_SBM_CFG2_SET_BUF_NUM_S 0
+#define DSAFV2_SBM_CFG2_SET_BUF_NUM_M (((1ULL << 9) - 1) << 0)
+#define DSAFV2_SBM_CFG2_RESET_BUF_NUM_S 9
+#define DSAFV2_SBM_CFG2_RESET_BUF_NUM_M (((1ULL << 9) - 1) << 9)
+
+#define DSAFV2_SBM_CFG3_SET_BUF_NUM_NO_PFC_S 0
+#define DSAFV2_SBM_CFG3_SET_BUF_NUM_NO_PFC_M (((1ULL << 9) - 1) << 0)
+#define DSAFV2_SBM_CFG3_RESET_BUF_NUM_NO_PFC_S 9
+#define DSAFV2_SBM_CFG3_RESET_BUF_NUM_NO_PFC_M (((1ULL << 9) - 1) << 9)
+
+#define DSAFV2_SBM_CFG4_SET_BUF_NUM_NO_PFC_S 0
+#define DSAFV2_SBM_CFG4_SET_BUF_NUM_NO_PFC_M (((1ULL << 9) - 1) << 0)
+#define DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_S 9
+#define DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_M (((1ULL << 9) - 1) << 9)
+
+#define DSAF_CHNS_MASK 0x3f000
+#define DSAF_SBM_ROCEE_CFG_CRD_EN_B 2
+#define SRST_TIME_INTERVAL 20
+#define DSAFV2_SBM_CFG2_ROCEE_SET_BUF_NUM_S 0
+#define DSAFV2_SBM_CFG2_ROCEE_SET_BUF_NUM_M (((1ULL << 8) - 1) << 0)
+#define DSAFV2_SBM_CFG2_ROCEE_RESET_BUF_NUM_S 8
+#define DSAFV2_SBM_CFG2_ROCEE_RESET_BUF_NUM_M (((1ULL << 8) - 1) << 8)
+
+#define DSAFV2_SBM_CFG2_PPE_SET_BUF_NUM_S (0)
+#define DSAFV2_SBM_CFG2_PPE_SET_BUF_NUM_M (((1ULL << 6) - 1) << 0)
+#define DSAFV2_SBM_CFG2_PPE_RESET_BUF_NUM_S (6)
+#define DSAFV2_SBM_CFG2_PPE_RESET_BUF_NUM_M (((1ULL << 6) - 1) << 6)
+#define DSAFV2_SBM_CFG2_PPE_CFG_USEFUL_NUM_S (12)
+#define DSAFV2_SBM_CFG2_PPE_CFG_USEFUL_NUM_M (((1ULL << 6) - 1) << 12)
+
+#define DSAF_TBL_TCAM_ADDR_S 0
+#define DSAF_TBL_TCAM_ADDR_M ((1ULL << 9) - 1)
+
+#define DSAF_TBL_LINE_ADDR_S 0
+#define DSAF_TBL_LINE_ADDR_M ((1ULL << 15) - 1)
+
+#define DSAF_TBL_MCAST_CFG4_VM128_112_S 0
+#define DSAF_TBL_MCAST_CFG4_VM128_112_M (((1ULL << 7) - 1) << 0)
+#define DSAF_TBL_MCAST_CFG4_ITEM_VLD_S 7
+#define DSAF_TBL_MCAST_CFG4_OLD_EN_S 8
+
+#define DSAF_TBL_MCAST_CFG0_XGE5_0_S 0
+#define DSAF_TBL_MCAST_CFG0_XGE5_0_M (((1ULL << 6) - 1) << 0)
+#define DSAF_TBL_MCAST_CFG0_VM25_0_S 6
+#define DSAF_TBL_MCAST_CFG0_VM25_0_M (((1ULL << 26) - 1) << 6)
+
+#define DSAF_TBL_UCAST_CFG1_OUT_PORT_S 0
+#define DSAF_TBL_UCAST_CFG1_OUT_PORT_M (((1ULL << 8) - 1) << 0)
+#define DSAF_TBL_UCAST_CFG1_DVC_S 8
+#define DSAF_TBL_UCAST_CFG1_MAC_DISCARD_S 9
+#define DSAF_TBL_UCAST_CFG1_ITEM_VLD_S 10
+#define DSAF_TBL_UCAST_CFG1_OLD_EN_S 11
+
+#define DSAF_TBL_LINE_CFG_OUT_PORT_S 0
+#define DSAF_TBL_LINE_CFG_OUT_PORT_M (((1ULL << 8) - 1) << 0)
+#define DSAF_TBL_LINE_CFG_DVC_S 8
+#define DSAF_TBL_LINE_CFG_MAC_DISCARD_S 9
+
+#define DSAF_TBL_PUL_OLD_RSLT_RE_S 0
+#define DSAF_TBL_PUL_MCAST_VLD_S 1
+#define DSAF_TBL_PUL_TCAM_DATA_VLD_S 2
+#define DSAF_TBL_PUL_UCAST_VLD_S 3
+#define DSAF_TBL_PUL_LINE_VLD_S 4
+#define DSAF_TBL_PUL_TCAM_LOAD_S 5
+#define DSAF_TBL_PUL_LINE_LOAD_S 6
+
+#define DSAF_TBL_DFX_LINE_LKUP_NUM_EN_S 0
+#define DSAF_TBL_DFX_UC_LKUP_NUM_EN_S 1
+#define DSAF_TBL_DFX_MC_LKUP_NUM_EN_S 2
+#define DSAF_TBL_DFX_BC_LKUP_NUM_EN_S 3
+#define DSAF_TBL_DFX_RAM_ERR_INJECT_EN_S 4
+
+#define DSAF_VOQ_BP_ALL_DOWNTHRD_S 0
+#define DSAF_VOQ_BP_ALL_DOWNTHRD_M (((1ULL << 10) - 1) << 0)
+#define DSAF_VOQ_BP_ALL_UPTHRD_S 10
+#define DSAF_VOQ_BP_ALL_UPTHRD_M (((1ULL << 10) - 1) << 10)
+
+#define DSAF_XGE_GE_WORK_MODE_S 0
+#define DSAF_XGE_GE_LOOPBACK_S 1
+
+#define DSAF_FC_XGE_TX_PAUSE_S 0
+#define DSAF_REGS_XGE_CNT_CAR_S 1
+
+#define PPE_CFG_QID_MODE_DEF_QID_S 0
+#define PPE_CFG_QID_MODE_DEF_QID_M (0xff << PPE_CFG_QID_MODE_DEF_QID_S)
+
+#define PPE_CFG_QID_MODE_CF_QID_MODE_S 8
+#define PPE_CFG_QID_MODE_CF_QID_MODE_M (0x7 << PPE_CFG_QID_MODE_CF_QID_MODE_S)
+
+#define PPEV2_CFG_RSS_TBL_4N0_S 0
+#define PPEV2_CFG_RSS_TBL_4N0_M (((1UL << 5) - 1) << PPEV2_CFG_RSS_TBL_4N0_S)
+
+#define PPEV2_CFG_RSS_TBL_4N1_S 8
+#define PPEV2_CFG_RSS_TBL_4N1_M (((1UL << 5) - 1) << PPEV2_CFG_RSS_TBL_4N1_S)
+
+#define PPEV2_CFG_RSS_TBL_4N2_S 16
+#define PPEV2_CFG_RSS_TBL_4N2_M (((1UL << 5) - 1) << PPEV2_CFG_RSS_TBL_4N2_S)
+
+#define PPEV2_CFG_RSS_TBL_4N3_S 24
+#define PPEV2_CFG_RSS_TBL_4N3_M (((1UL << 5) - 1) << PPEV2_CFG_RSS_TBL_4N3_S)
+
+#define DSAFV2_SERDES_LBK_EN_B 8
+#define DSAFV2_SERDES_LBK_QID_S 0
+#define DSAFV2_SERDES_LBK_QID_M (((1UL << 8) - 1) << DSAFV2_SERDES_LBK_QID_S)
+
+#define PPE_CNT_CLR_CE_B 0
+#define PPE_CNT_CLR_SNAP_EN_B 1
+
+#define PPE_INT_GAPTIME_B 0
+#define PPE_INT_GAPTIME_M 0x3ff
+
+#define PPE_COMMON_CNT_CLR_CE_B 0
+#define PPE_COMMON_CNT_CLR_SNAP_EN_B 1
+#define RCB_COM_TSO_MODE_B 0
+#define RCB_COM_CFG_FNA_B 1
+#define RCB_COM_CFG_FA_B 0
+
+#define GMAC_DUPLEX_TYPE_B 0
+
+#define GMAC_TX_WATER_LINE_MASK ((1UL << 8) - 1)
+#define GMAC_TX_WATER_LINE_SHIFT 0
+
+#define GMAC_FC_TX_TIMER_S 0
+#define GMAC_FC_TX_TIMER_M 0xffff
+
+#define GMAC_MAX_FRM_SIZE_S 0
+#define GMAC_MAX_FRM_SIZE_M 0xffff
+
+#define GMAC_PORT_MODE_S 0
+#define GMAC_PORT_MODE_M 0xf
+
+#define GMAC_RGMII_1000M_DELAY_B 4
+#define GMAC_MII_TX_EDGE_SEL_B 5
+#define GMAC_FIFO_ERR_AUTO_RST_B 6
+#define GMAC_DBG_CLK_LOS_MSK_B 7
+
+#define GMAC_PORT_RX_EN_B 1
+#define GMAC_PORT_TX_EN_B 2
+
+#define GMAC_PAUSE_EN_RX_FDFC_B 0
+#define GMAC_PAUSE_EN_TX_FDFC_B 1
+#define GMAC_PAUSE_EN_TX_HDFC_B 2
+
+#define GMAC_SHORT_RUNTS_THR_S 0
+#define GMAC_SHORT_RUNTS_THR_M 0x1f
+
+#define GMAC_AN_NEG_STAT_FD_B 5
+#define GMAC_AN_NEG_STAT_HD_B 6
+#define GMAC_AN_NEG_STAT_RF1_DUPLIEX_B 12
+#define GMAC_AN_NEG_STAT_RF2_B 13
+
+#define GMAC_AN_NEG_STAT_NP_LNK_OK_B 15
+#define GMAC_AN_NEG_STAT_RX_SYNC_OK_B 20
+#define GMAC_AN_NEG_STAT_AN_DONE_B 21
+
+#define GMAC_AN_NEG_STAT_PS_S 7
+#define GMAC_AN_NEG_STAT_PS_M (0x3 << GMAC_AN_NEG_STAT_PS_S)
+
+#define GMAC_AN_NEG_STAT_SPEED_S 10
+#define GMAC_AN_NEG_STAT_SPEED_M (0x3 << GMAC_AN_NEG_STAT_SPEED_S)
+
+#define GMAC_TX_AN_EN_B 5
+#define GMAC_TX_CRC_ADD_B 6
+#define GMAC_TX_PAD_EN_B 7
+
+#define GMAC_LINE_LOOPBACK_B 0
+
+#define GMAC_LP_REG_CF_EXT_DRV_LP_B 1
+#define GMAC_LP_REG_CF2MI_LP_EN_B 2
+
+#define GMAC_MODE_CHANGE_EB_B 0
+#define GMAC_UC_MATCH_EN_B 0
+#define GMAC_ADDR_EN_B 16
+
+#define GMAC_RECV_CTRL_STRIP_PAD_EN_B 3
+#define GMAC_RECV_CTRL_RUNT_PKT_EN_B 4
+
+#define GMAC_TX_LOOP_PKT_HIG_PRI_B 0
+#define GMAC_TX_LOOP_PKT_EN_B 1
+
+#define XGMAC_PORT_MODE_TX_S 0x0
+#define XGMAC_PORT_MODE_TX_M (0x3 << XGMAC_PORT_MODE_TX_S)
+#define XGMAC_PORT_MODE_TX_40G_B 0x3
+#define XGMAC_PORT_MODE_RX_S 0x4
+#define XGMAC_PORT_MODE_RX_M (0x3 << XGMAC_PORT_MODE_RX_S)
+#define XGMAC_PORT_MODE_RX_40G_B 0x7
+
+#define XGMAC_ENABLE_TX_B 0
+#define XGMAC_ENABLE_RX_B 1
+
+#define XGMAC_UNIDIR_EN_B 0
+#define XGMAC_RF_TX_EN_B 1
+#define XGMAC_LF_RF_INSERT_S 2
+#define XGMAC_LF_RF_INSERT_M (0x3 << XGMAC_LF_RF_INSERT_S)
+
+#define XGMAC_CTL_TX_FCS_B 0
+#define XGMAC_CTL_TX_PAD_B 1
+#define XGMAC_CTL_TX_PREAMBLE_TRANS_B 3
+#define XGMAC_CTL_TX_UNDER_MIN_ERR_B 4
+#define XGMAC_CTL_TX_TRUNCATE_B 5
+#define XGMAC_CTL_TX_1588_B 8
+#define XGMAC_CTL_TX_1731_B 9
+#define XGMAC_CTL_TX_PFC_B 10
+#define XGMAC_CTL_RX_FCS_B 16
+#define XGMAC_CTL_RX_FCS_STRIP_B 17
+#define XGMAC_CTL_RX_PREAMBLE_TRANS_B 19
+#define XGMAC_CTL_RX_UNDER_MIN_ERR_B 20
+#define XGMAC_CTL_RX_TRUNCATE_B 21
+#define XGMAC_CTL_RX_1588_B 24
+#define XGMAC_CTL_RX_1731_B 25
+#define XGMAC_CTL_RX_PFC_B 26
+
+#define XGMAC_PMA_FEC_CTL_TX_B 0
+#define XGMAC_PMA_FEC_CTL_RX_B 1
+#define XGMAC_PMA_FEC_CTL_ERR_EN 2
+#define XGMAC_PMA_FEC_CTL_ERR_SH 3
+
+#define XGMAC_PAUSE_CTL_TX_B 0
+#define XGMAC_PAUSE_CTL_RX_B 1
+#define XGMAC_PAUSE_CTL_RSP_MODE_B 2
+#define XGMAC_PAUSE_CTL_TX_XOFF_B 3
+
+static inline void dsaf_write_reg(u8 __iomem *base, u32 reg, u32 value)
+{
+ writel(value, base + reg);
+}
+
+#define dsaf_write_dev(a, reg, value) \
+ dsaf_write_reg((a)->io_base, (reg), (value))
+
+static inline u32 dsaf_read_reg(u8 __iomem *base, u32 reg)
+{
+ return readl(base + reg);
+}
+
+static inline void dsaf_write_syscon(struct regmap *base, u32 reg, u32 value)
+{
+ regmap_write(base, reg, value);
+}
+
+static inline int dsaf_read_syscon(struct regmap *base, u32 reg, u32 *val)
+{
+ return regmap_read(base, reg, val);
+}
+
+#define dsaf_read_dev(a, reg) \
+ dsaf_read_reg((a)->io_base, (reg))
+
+#define dsaf_set_field(origin, mask, shift, val) \
+ do { \
+ (origin) &= (~(mask)); \
+ (origin) |= (((val) << (shift)) & (mask)); \
+ } while (0)
+
+#define dsaf_set_bit(origin, shift, val) \
+ dsaf_set_field((origin), (1ull << (shift)), (shift), (val))
+
+static inline void dsaf_set_reg_field(u8 __iomem *base, u32 reg, u32 mask,
+ u32 shift, u32 val)
+{
+ u32 origin = dsaf_read_reg(base, reg);
+
+ dsaf_set_field(origin, mask, shift, val);
+ dsaf_write_reg(base, reg, origin);
+}
+
+#define dsaf_set_dev_field(dev, reg, mask, shift, val) \
+ dsaf_set_reg_field((dev)->io_base, (reg), (mask), (shift), (val))
+
+#define dsaf_set_dev_bit(dev, reg, bit, val) \
+ dsaf_set_reg_field((dev)->io_base, (reg), (1ull << (bit)), (bit), (val))
+
+#define dsaf_get_field(origin, mask, shift) (((origin) & (mask)) >> (shift))
+
+#define dsaf_get_bit(origin, shift) \
+ dsaf_get_field((origin), (1ull << (shift)), (shift))
+
+static inline u32 dsaf_get_reg_field(u8 __iomem *base, u32 reg, u32 mask,
+ u32 shift)
+{
+ u32 origin;
+
+ origin = dsaf_read_reg(base, reg);
+ return dsaf_get_field(origin, mask, shift);
+}
+
+#define dsaf_get_dev_field(dev, reg, mask, shift) \
+ dsaf_get_reg_field((dev)->io_base, (reg), (mask), (shift))
+
+#define dsaf_get_dev_bit(dev, reg, bit) \
+ dsaf_get_reg_field((dev)->io_base, (reg), (1ull << (bit)), (bit))
+
+#define dsaf_write_b(addr, data)\
+ writeb((data), (__iomem u8 *)(addr))
+#define dsaf_read_b(addr)\
+ readb((__iomem u8 *)(addr))
+
+#define hns_mac_reg_read64(drv, offset) \
+ readq((__iomem void *)(((drv)->io_base + 0xc00 + (offset))))
+
+#endif /* _DSAF_REG_H */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
new file mode 100644
index 0000000000..fc26ffaae6
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
@@ -0,0 +1,819 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ */
+
+#include <linux/io-64-nonatomic-hi-lo.h>
+#include <linux/of_mdio.h>
+#include "hns_dsaf_main.h"
+#include "hns_dsaf_mac.h"
+#include "hns_dsaf_xgmac.h"
+#include "hns_dsaf_reg.h"
+
+static const struct mac_stats_string g_xgmac_stats_string[] = {
+ {"xgmac_tx_bad_pkts_minto64", MAC_STATS_FIELD_OFF(tx_fragment_err)},
+ {"xgmac_tx_good_pkts_minto64", MAC_STATS_FIELD_OFF(tx_undersize)},
+ {"xgmac_tx_total_pkts_minto64", MAC_STATS_FIELD_OFF(tx_under_min_pkts)},
+ {"xgmac_tx_pkts_64", MAC_STATS_FIELD_OFF(tx_64bytes)},
+ {"xgmac_tx_pkts_65to127", MAC_STATS_FIELD_OFF(tx_65to127)},
+ {"xgmac_tx_pkts_128to255", MAC_STATS_FIELD_OFF(tx_128to255)},
+ {"xgmac_tx_pkts_256to511", MAC_STATS_FIELD_OFF(tx_256to511)},
+ {"xgmac_tx_pkts_512to1023", MAC_STATS_FIELD_OFF(tx_512to1023)},
+ {"xgmac_tx_pkts_1024to1518", MAC_STATS_FIELD_OFF(tx_1024to1518)},
+ {"xgmac_tx_pkts_1519tomax", MAC_STATS_FIELD_OFF(tx_1519tomax)},
+ {"xgmac_tx_good_pkts_1519tomax",
+ MAC_STATS_FIELD_OFF(tx_1519tomax_good)},
+ {"xgmac_tx_good_pkts_untralmax", MAC_STATS_FIELD_OFF(tx_oversize)},
+ {"xgmac_tx_bad_pkts_untralmax", MAC_STATS_FIELD_OFF(tx_jabber_err)},
+ {"xgmac_tx_good_pkts_all", MAC_STATS_FIELD_OFF(tx_good_pkts)},
+ {"xgmac_tx_good_byte_all", MAC_STATS_FIELD_OFF(tx_good_bytes)},
+ {"xgmac_tx_total_pkt", MAC_STATS_FIELD_OFF(tx_total_pkts)},
+ {"xgmac_tx_total_byt", MAC_STATS_FIELD_OFF(tx_total_bytes)},
+ {"xgmac_tx_uc_pkt", MAC_STATS_FIELD_OFF(tx_uc_pkts)},
+ {"xgmac_tx_mc_pkt", MAC_STATS_FIELD_OFF(tx_mc_pkts)},
+ {"xgmac_tx_bc_pkt", MAC_STATS_FIELD_OFF(tx_bc_pkts)},
+ {"xgmac_tx_pause_frame_num", MAC_STATS_FIELD_OFF(tx_pfc_tc0)},
+ {"xgmac_tx_pfc_per_1pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc1)},
+ {"xgmac_tx_pfc_per_2pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc2)},
+ {"xgmac_tx_pfc_per_3pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc3)},
+ {"xgmac_tx_pfc_per_4pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc4)},
+ {"xgmac_tx_pfc_per_5pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc5)},
+ {"xgmac_tx_pfc_per_6pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc6)},
+ {"xgmac_tx_pfc_per_7pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc7)},
+ {"xgmac_tx_mac_ctrol_frame", MAC_STATS_FIELD_OFF(tx_ctrl)},
+ {"xgmac_tx_1731_pkts", MAC_STATS_FIELD_OFF(tx_1731_pkts)},
+ {"xgmac_tx_1588_pkts", MAC_STATS_FIELD_OFF(tx_1588_pkts)},
+ {"xgmac_rx_good_pkt_from_dsaf", MAC_STATS_FIELD_OFF(rx_good_from_sw)},
+ {"xgmac_rx_bad_pkt_from_dsaf", MAC_STATS_FIELD_OFF(rx_bad_from_sw)},
+ {"xgmac_tx_bad_pkt_64tomax", MAC_STATS_FIELD_OFF(tx_bad_pkts)},
+
+ {"xgmac_rx_bad_pkts_minto64", MAC_STATS_FIELD_OFF(rx_fragment_err)},
+ {"xgmac_rx_good_pkts_minto64", MAC_STATS_FIELD_OFF(rx_undersize)},
+ {"xgmac_rx_total_pkts_minto64", MAC_STATS_FIELD_OFF(rx_under_min)},
+ {"xgmac_rx_pkt_64", MAC_STATS_FIELD_OFF(rx_64bytes)},
+ {"xgmac_rx_pkt_65to127", MAC_STATS_FIELD_OFF(rx_65to127)},
+ {"xgmac_rx_pkt_128to255", MAC_STATS_FIELD_OFF(rx_128to255)},
+ {"xgmac_rx_pkt_256to511", MAC_STATS_FIELD_OFF(rx_256to511)},
+ {"xgmac_rx_pkt_512to1023", MAC_STATS_FIELD_OFF(rx_512to1023)},
+ {"xgmac_rx_pkt_1024to1518", MAC_STATS_FIELD_OFF(rx_1024to1518)},
+ {"xgmac_rx_pkt_1519tomax", MAC_STATS_FIELD_OFF(rx_1519tomax)},
+ {"xgmac_rx_good_pkt_1519tomax", MAC_STATS_FIELD_OFF(rx_1519tomax_good)},
+ {"xgmac_rx_good_pkt_untramax", MAC_STATS_FIELD_OFF(rx_oversize)},
+ {"xgmac_rx_bad_pkt_untramax", MAC_STATS_FIELD_OFF(rx_jabber_err)},
+ {"xgmac_rx_good_pkt", MAC_STATS_FIELD_OFF(rx_good_pkts)},
+ {"xgmac_rx_good_byt", MAC_STATS_FIELD_OFF(rx_good_bytes)},
+ {"xgmac_rx_pkt", MAC_STATS_FIELD_OFF(rx_total_pkts)},
+ {"xgmac_rx_byt", MAC_STATS_FIELD_OFF(rx_total_bytes)},
+ {"xgmac_rx_uc_pkt", MAC_STATS_FIELD_OFF(rx_uc_pkts)},
+ {"xgmac_rx_mc_pkt", MAC_STATS_FIELD_OFF(rx_mc_pkts)},
+ {"xgmac_rx_bc_pkt", MAC_STATS_FIELD_OFF(rx_bc_pkts)},
+ {"xgmac_rx_pause_frame_num", MAC_STATS_FIELD_OFF(rx_pfc_tc0)},
+ {"xgmac_rx_pfc_per_1pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc1)},
+ {"xgmac_rx_pfc_per_2pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc2)},
+ {"xgmac_rx_pfc_per_3pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc3)},
+ {"xgmac_rx_pfc_per_4pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc4)},
+ {"xgmac_rx_pfc_per_5pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc5)},
+ {"xgmac_rx_pfc_per_6pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc6)},
+ {"xgmac_rx_pfc_per_7pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc7)},
+ {"xgmac_rx_mac_control", MAC_STATS_FIELD_OFF(rx_unknown_ctrl)},
+ {"xgmac_tx_good_pkt_todsaf", MAC_STATS_FIELD_OFF(tx_good_to_sw)},
+ {"xgmac_tx_bad_pkt_todsaf", MAC_STATS_FIELD_OFF(tx_bad_to_sw)},
+ {"xgmac_rx_1731_pkt", MAC_STATS_FIELD_OFF(rx_1731_pkts)},
+ {"xgmac_rx_symbol_err_pkt", MAC_STATS_FIELD_OFF(rx_symbol_err)},
+ {"xgmac_rx_fcs_pkt", MAC_STATS_FIELD_OFF(rx_fcs_err)}
+};
+
+/**
+ *hns_xgmac_tx_enable - xgmac port tx enable
+ *@drv: mac driver
+ *@value: value of enable
+ */
+static void hns_xgmac_tx_enable(struct mac_driver *drv, u32 value)
+{
+ dsaf_set_dev_bit(drv, XGMAC_MAC_ENABLE_REG, XGMAC_ENABLE_TX_B, !!value);
+}
+
+/**
+ *hns_xgmac_rx_enable - xgmac port rx enable
+ *@drv: mac driver
+ *@value: value of enable
+ */
+static void hns_xgmac_rx_enable(struct mac_driver *drv, u32 value)
+{
+ dsaf_set_dev_bit(drv, XGMAC_MAC_ENABLE_REG, XGMAC_ENABLE_RX_B, !!value);
+}
+
+/**
+ * hns_xgmac_lf_rf_insert - insert lf rf control about xgmac
+ * @mac_drv: mac driver
+ * @mode: inserf rf or lf
+ */
+static void hns_xgmac_lf_rf_insert(struct mac_driver *mac_drv, u32 mode)
+{
+ dsaf_set_dev_field(mac_drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG,
+ XGMAC_LF_RF_INSERT_M, XGMAC_LF_RF_INSERT_S, mode);
+}
+
+/**
+ * hns_xgmac_lf_rf_control_init - initial the lf rf control register
+ * @mac_drv: mac driver
+ */
+static void hns_xgmac_lf_rf_control_init(struct mac_driver *mac_drv)
+{
+ u32 val = 0;
+
+ dsaf_set_bit(val, XGMAC_UNIDIR_EN_B, 0);
+ dsaf_set_bit(val, XGMAC_RF_TX_EN_B, 1);
+ dsaf_set_field(val, XGMAC_LF_RF_INSERT_M, XGMAC_LF_RF_INSERT_S, 0);
+ dsaf_write_dev(mac_drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG, val);
+}
+
+/**
+ *hns_xgmac_enable - enable xgmac port
+ *@mac_drv: mac driver
+ *@mode: mode of mac port
+ */
+static void hns_xgmac_enable(void *mac_drv, enum mac_commom_mode mode)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ hns_xgmac_lf_rf_insert(drv, HNS_XGMAC_NO_LF_RF_INSERT);
+
+ /*enable XGE rX/tX */
+ if (mode == MAC_COMM_MODE_TX) {
+ hns_xgmac_tx_enable(drv, 1);
+ } else if (mode == MAC_COMM_MODE_RX) {
+ hns_xgmac_rx_enable(drv, 1);
+ } else if (mode == MAC_COMM_MODE_RX_AND_TX) {
+ hns_xgmac_tx_enable(drv, 1);
+ hns_xgmac_rx_enable(drv, 1);
+ } else {
+ dev_err(drv->dev, "error mac mode:%d\n", mode);
+ }
+}
+
+/**
+ *hns_xgmac_disable - disable xgmac port
+ *@mac_drv: mac driver
+ *@mode: mode of mac port
+ */
+static void hns_xgmac_disable(void *mac_drv, enum mac_commom_mode mode)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ if (mode == MAC_COMM_MODE_TX) {
+ hns_xgmac_tx_enable(drv, 0);
+ } else if (mode == MAC_COMM_MODE_RX) {
+ hns_xgmac_rx_enable(drv, 0);
+ } else if (mode == MAC_COMM_MODE_RX_AND_TX) {
+ hns_xgmac_tx_enable(drv, 0);
+ hns_xgmac_rx_enable(drv, 0);
+ }
+ hns_xgmac_lf_rf_insert(drv, HNS_XGMAC_LF_INSERT);
+}
+
+/**
+ *hns_xgmac_pma_fec_enable - xgmac PMA FEC enable
+ *@drv: mac driver
+ *@tx_value: tx value
+ *@rx_value: rx value
+ *return status
+ */
+static void hns_xgmac_pma_fec_enable(struct mac_driver *drv, u32 tx_value,
+ u32 rx_value)
+{
+ u32 origin = dsaf_read_dev(drv, XGMAC_PMA_FEC_CONTROL_REG);
+
+ dsaf_set_bit(origin, XGMAC_PMA_FEC_CTL_TX_B, !!tx_value);
+ dsaf_set_bit(origin, XGMAC_PMA_FEC_CTL_RX_B, !!rx_value);
+ dsaf_write_dev(drv, XGMAC_PMA_FEC_CONTROL_REG, origin);
+}
+
+/* clr exc irq for xge*/
+static void hns_xgmac_exc_irq_en(struct mac_driver *drv, u32 en)
+{
+ u32 clr_vlue = 0xfffffffful;
+ u32 msk_vlue = en ? 0xfffffffful : 0; /*1 is en, 0 is dis*/
+
+ dsaf_write_dev(drv, XGMAC_INT_STATUS_REG, clr_vlue);
+ dsaf_write_dev(drv, XGMAC_INT_ENABLE_REG, msk_vlue);
+}
+
+/**
+ *hns_xgmac_init - initialize XGE
+ *@mac_drv: mac driver
+ */
+static void hns_xgmac_init(void *mac_drv)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+ struct dsaf_device *dsaf_dev
+ = (struct dsaf_device *)dev_get_drvdata(drv->dev);
+ u32 port = drv->mac_id;
+
+ dsaf_dev->misc_op->xge_srst(dsaf_dev, port, 0);
+ msleep(100);
+ dsaf_dev->misc_op->xge_srst(dsaf_dev, port, 1);
+
+ msleep(100);
+ hns_xgmac_lf_rf_control_init(drv);
+ hns_xgmac_exc_irq_en(drv, 0);
+
+ hns_xgmac_pma_fec_enable(drv, 0x0, 0x0);
+
+ hns_xgmac_disable(mac_drv, MAC_COMM_MODE_RX_AND_TX);
+}
+
+/**
+ *hns_xgmac_config_pad_and_crc - set xgmac pad and crc enable the same time
+ *@mac_drv: mac driver
+ *@newval:enable of pad and crc
+ */
+static void hns_xgmac_config_pad_and_crc(void *mac_drv, u8 newval)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+ u32 origin = dsaf_read_dev(drv, XGMAC_MAC_CONTROL_REG);
+
+ dsaf_set_bit(origin, XGMAC_CTL_TX_PAD_B, !!newval);
+ dsaf_set_bit(origin, XGMAC_CTL_TX_FCS_B, !!newval);
+ dsaf_set_bit(origin, XGMAC_CTL_RX_FCS_B, !!newval);
+ dsaf_write_dev(drv, XGMAC_MAC_CONTROL_REG, origin);
+}
+
+/**
+ *hns_xgmac_pausefrm_cfg - set pause param about xgmac
+ *@mac_drv: mac driver
+ *@rx_en: enable receive
+ *@tx_en: enable transmit
+ */
+static void hns_xgmac_pausefrm_cfg(void *mac_drv, u32 rx_en, u32 tx_en)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+ u32 origin = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_CTRL_REG);
+
+ dsaf_set_bit(origin, XGMAC_PAUSE_CTL_TX_B, !!tx_en);
+ dsaf_set_bit(origin, XGMAC_PAUSE_CTL_RX_B, !!rx_en);
+ dsaf_write_dev(drv, XGMAC_MAC_PAUSE_CTRL_REG, origin);
+}
+
+static void hns_xgmac_set_pausefrm_mac_addr(void *mac_drv, const char *mac_addr)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ u32 high_val = mac_addr[1] | (mac_addr[0] << 8);
+ u32 low_val = mac_addr[5] | (mac_addr[4] << 8)
+ | (mac_addr[3] << 16) | (mac_addr[2] << 24);
+ dsaf_write_dev(drv, XGMAC_MAC_PAUSE_LOCAL_MAC_L_REG, low_val);
+ dsaf_write_dev(drv, XGMAC_MAC_PAUSE_LOCAL_MAC_H_REG, high_val);
+}
+
+/**
+ *hns_xgmac_set_tx_auto_pause_frames - set tx pause param about xgmac
+ *@mac_drv: mac driver
+ *@enable:enable tx pause param
+ */
+static void hns_xgmac_set_tx_auto_pause_frames(void *mac_drv, u16 enable)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ dsaf_set_dev_bit(drv, XGMAC_MAC_PAUSE_CTRL_REG,
+ XGMAC_PAUSE_CTL_TX_B, !!enable);
+
+ /*if enable is not zero ,set tx pause time */
+ if (enable)
+ dsaf_write_dev(drv, XGMAC_MAC_PAUSE_TIME_REG, enable);
+}
+
+/**
+ *hns_xgmac_config_max_frame_length - set xgmac max frame length
+ *@mac_drv: mac driver
+ *@newval:xgmac max frame length
+ */
+static void hns_xgmac_config_max_frame_length(void *mac_drv, u16 newval)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ dsaf_write_dev(drv, XGMAC_MAC_MAX_PKT_SIZE_REG, newval);
+}
+
+static void hns_xgmac_update_stats(void *mac_drv)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+ struct mac_hw_stats *hw_stats = &drv->mac_cb->hw_stats;
+
+ /* TX */
+ hw_stats->tx_fragment_err
+ = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_FRAGMENT);
+ hw_stats->tx_undersize
+ = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_UNDERSIZE);
+ hw_stats->tx_under_min_pkts
+ = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_UNDERMIN);
+ hw_stats->tx_64bytes = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_64OCTETS);
+ hw_stats->tx_65to127
+ = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_65TO127OCTETS);
+ hw_stats->tx_128to255
+ = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_128TO255OCTETS);
+ hw_stats->tx_256to511
+ = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_256TO511OCTETS);
+ hw_stats->tx_512to1023
+ = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_512TO1023OCTETS);
+ hw_stats->tx_1024to1518
+ = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_1024TO1518OCTETS);
+ hw_stats->tx_1519tomax
+ = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_1519TOMAXOCTETS);
+ hw_stats->tx_1519tomax_good
+ = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_1519TOMAXOCTETSOK);
+ hw_stats->tx_oversize = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_OVERSIZE);
+ hw_stats->tx_jabber_err = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_JABBER);
+ hw_stats->tx_good_pkts = hns_mac_reg_read64(drv, XGMAC_TX_GOODPKTS);
+ hw_stats->tx_good_bytes = hns_mac_reg_read64(drv, XGMAC_TX_GOODOCTETS);
+ hw_stats->tx_total_pkts = hns_mac_reg_read64(drv, XGMAC_TX_TOTAL_PKTS);
+ hw_stats->tx_total_bytes
+ = hns_mac_reg_read64(drv, XGMAC_TX_TOTALOCTETS);
+ hw_stats->tx_uc_pkts = hns_mac_reg_read64(drv, XGMAC_TX_UNICASTPKTS);
+ hw_stats->tx_mc_pkts = hns_mac_reg_read64(drv, XGMAC_TX_MULTICASTPKTS);
+ hw_stats->tx_bc_pkts = hns_mac_reg_read64(drv, XGMAC_TX_BROADCASTPKTS);
+ hw_stats->tx_pfc_tc0 = hns_mac_reg_read64(drv, XGMAC_TX_PRI0PAUSEPKTS);
+ hw_stats->tx_pfc_tc1 = hns_mac_reg_read64(drv, XGMAC_TX_PRI1PAUSEPKTS);
+ hw_stats->tx_pfc_tc2 = hns_mac_reg_read64(drv, XGMAC_TX_PRI2PAUSEPKTS);
+ hw_stats->tx_pfc_tc3 = hns_mac_reg_read64(drv, XGMAC_TX_PRI3PAUSEPKTS);
+ hw_stats->tx_pfc_tc4 = hns_mac_reg_read64(drv, XGMAC_TX_PRI4PAUSEPKTS);
+ hw_stats->tx_pfc_tc5 = hns_mac_reg_read64(drv, XGMAC_TX_PRI5PAUSEPKTS);
+ hw_stats->tx_pfc_tc6 = hns_mac_reg_read64(drv, XGMAC_TX_PRI6PAUSEPKTS);
+ hw_stats->tx_pfc_tc7 = hns_mac_reg_read64(drv, XGMAC_TX_PRI7PAUSEPKTS);
+ hw_stats->tx_ctrl = hns_mac_reg_read64(drv, XGMAC_TX_MACCTRLPKTS);
+ hw_stats->tx_1731_pkts = hns_mac_reg_read64(drv, XGMAC_TX_1731PKTS);
+ hw_stats->tx_1588_pkts = hns_mac_reg_read64(drv, XGMAC_TX_1588PKTS);
+ hw_stats->rx_good_from_sw
+ = hns_mac_reg_read64(drv, XGMAC_RX_FROMAPPGOODPKTS);
+ hw_stats->rx_bad_from_sw
+ = hns_mac_reg_read64(drv, XGMAC_RX_FROMAPPBADPKTS);
+ hw_stats->tx_bad_pkts = hns_mac_reg_read64(drv, XGMAC_TX_ERRALLPKTS);
+
+ /* RX */
+ hw_stats->rx_fragment_err
+ = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_FRAGMENT);
+ hw_stats->rx_undersize
+ = hns_mac_reg_read64(drv, XGMAC_RX_PKTSUNDERSIZE);
+ hw_stats->rx_under_min
+ = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_UNDERMIN);
+ hw_stats->rx_64bytes = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_64OCTETS);
+ hw_stats->rx_65to127
+ = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_65TO127OCTETS);
+ hw_stats->rx_128to255
+ = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_128TO255OCTETS);
+ hw_stats->rx_256to511
+ = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_256TO511OCTETS);
+ hw_stats->rx_512to1023
+ = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_512TO1023OCTETS);
+ hw_stats->rx_1024to1518
+ = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_1024TO1518OCTETS);
+ hw_stats->rx_1519tomax
+ = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_1519TOMAXOCTETS);
+ hw_stats->rx_1519tomax_good
+ = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_1519TOMAXOCTETSOK);
+ hw_stats->rx_oversize = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_OVERSIZE);
+ hw_stats->rx_jabber_err = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_JABBER);
+ hw_stats->rx_good_pkts = hns_mac_reg_read64(drv, XGMAC_RX_GOODPKTS);
+ hw_stats->rx_good_bytes = hns_mac_reg_read64(drv, XGMAC_RX_GOODOCTETS);
+ hw_stats->rx_total_pkts = hns_mac_reg_read64(drv, XGMAC_RX_TOTAL_PKTS);
+ hw_stats->rx_total_bytes
+ = hns_mac_reg_read64(drv, XGMAC_RX_TOTALOCTETS);
+ hw_stats->rx_uc_pkts = hns_mac_reg_read64(drv, XGMAC_RX_UNICASTPKTS);
+ hw_stats->rx_mc_pkts = hns_mac_reg_read64(drv, XGMAC_RX_MULTICASTPKTS);
+ hw_stats->rx_bc_pkts = hns_mac_reg_read64(drv, XGMAC_RX_BROADCASTPKTS);
+ hw_stats->rx_pfc_tc0 = hns_mac_reg_read64(drv, XGMAC_RX_PRI0PAUSEPKTS);
+ hw_stats->rx_pfc_tc1 = hns_mac_reg_read64(drv, XGMAC_RX_PRI1PAUSEPKTS);
+ hw_stats->rx_pfc_tc2 = hns_mac_reg_read64(drv, XGMAC_RX_PRI2PAUSEPKTS);
+ hw_stats->rx_pfc_tc3 = hns_mac_reg_read64(drv, XGMAC_RX_PRI3PAUSEPKTS);
+ hw_stats->rx_pfc_tc4 = hns_mac_reg_read64(drv, XGMAC_RX_PRI4PAUSEPKTS);
+ hw_stats->rx_pfc_tc5 = hns_mac_reg_read64(drv, XGMAC_RX_PRI5PAUSEPKTS);
+ hw_stats->rx_pfc_tc6 = hns_mac_reg_read64(drv, XGMAC_RX_PRI6PAUSEPKTS);
+ hw_stats->rx_pfc_tc7 = hns_mac_reg_read64(drv, XGMAC_RX_PRI7PAUSEPKTS);
+
+ hw_stats->rx_unknown_ctrl
+ = hns_mac_reg_read64(drv, XGMAC_RX_MACCTRLPKTS);
+ hw_stats->tx_good_to_sw
+ = hns_mac_reg_read64(drv, XGMAC_TX_SENDAPPGOODPKTS);
+ hw_stats->tx_bad_to_sw
+ = hns_mac_reg_read64(drv, XGMAC_TX_SENDAPPBADPKTS);
+ hw_stats->rx_1731_pkts = hns_mac_reg_read64(drv, XGMAC_RX_1731PKTS);
+ hw_stats->rx_symbol_err
+ = hns_mac_reg_read64(drv, XGMAC_RX_SYMBOLERRPKTS);
+ hw_stats->rx_fcs_err = hns_mac_reg_read64(drv, XGMAC_RX_FCSERRPKTS);
+}
+
+/**
+ *hns_xgmac_free - free xgmac driver
+ *@mac_drv: mac driver
+ */
+static void hns_xgmac_free(void *mac_drv)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+ struct dsaf_device *dsaf_dev
+ = (struct dsaf_device *)dev_get_drvdata(drv->dev);
+
+ u32 mac_id = drv->mac_id;
+
+ dsaf_dev->misc_op->xge_srst(dsaf_dev, mac_id, 0);
+}
+
+/**
+ *hns_xgmac_get_info - get xgmac information
+ *@mac_drv: mac driver
+ *@mac_info:mac information
+ */
+static void hns_xgmac_get_info(void *mac_drv, struct mac_info *mac_info)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+ u32 pause_time, pause_ctrl, port_mode, ctrl_val;
+
+ ctrl_val = dsaf_read_dev(drv, XGMAC_MAC_CONTROL_REG);
+ mac_info->pad_and_crc_en = dsaf_get_bit(ctrl_val, XGMAC_CTL_TX_PAD_B);
+ mac_info->auto_neg = 0;
+
+ pause_time = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_TIME_REG);
+ mac_info->tx_pause_time = pause_time;
+
+ port_mode = dsaf_read_dev(drv, XGMAC_PORT_MODE_REG);
+ mac_info->port_en = dsaf_get_field(port_mode, XGMAC_PORT_MODE_TX_M,
+ XGMAC_PORT_MODE_TX_S) &&
+ dsaf_get_field(port_mode, XGMAC_PORT_MODE_RX_M,
+ XGMAC_PORT_MODE_RX_S);
+ mac_info->duplex = 1;
+ mac_info->speed = MAC_SPEED_10000;
+
+ pause_ctrl = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_CTRL_REG);
+ mac_info->rx_pause_en = dsaf_get_bit(pause_ctrl, XGMAC_PAUSE_CTL_RX_B);
+ mac_info->tx_pause_en = dsaf_get_bit(pause_ctrl, XGMAC_PAUSE_CTL_TX_B);
+}
+
+/**
+ *hns_xgmac_get_pausefrm_cfg - get xgmac pause param
+ *@mac_drv: mac driver
+ *@rx_en:xgmac rx pause enable
+ *@tx_en:xgmac tx pause enable
+ */
+static void hns_xgmac_get_pausefrm_cfg(void *mac_drv, u32 *rx_en, u32 *tx_en)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+ u32 pause_ctrl;
+
+ pause_ctrl = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_CTRL_REG);
+ *rx_en = dsaf_get_bit(pause_ctrl, XGMAC_PAUSE_CTL_RX_B);
+ *tx_en = dsaf_get_bit(pause_ctrl, XGMAC_PAUSE_CTL_TX_B);
+}
+
+/**
+ *hns_xgmac_get_link_status - get xgmac link status
+ *@mac_drv: mac driver
+ *@link_stat: xgmac link stat
+ */
+static void hns_xgmac_get_link_status(void *mac_drv, u32 *link_stat)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ *link_stat = dsaf_read_dev(drv, XGMAC_LINK_STATUS_REG);
+}
+
+/**
+ *hns_xgmac_get_regs - dump xgmac regs
+ *@mac_drv: mac driver
+ *@data:data for value of regs
+ */
+static void hns_xgmac_get_regs(void *mac_drv, void *data)
+{
+ u32 i;
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+ u32 *regs = data;
+ u64 qtmp;
+
+ /* base config registers */
+ regs[0] = dsaf_read_dev(drv, XGMAC_INT_STATUS_REG);
+ regs[1] = dsaf_read_dev(drv, XGMAC_INT_ENABLE_REG);
+ regs[2] = dsaf_read_dev(drv, XGMAC_INT_SET_REG);
+ regs[3] = dsaf_read_dev(drv, XGMAC_IERR_U_INFO_REG);
+ regs[4] = dsaf_read_dev(drv, XGMAC_OVF_INFO_REG);
+ regs[5] = dsaf_read_dev(drv, XGMAC_OVF_CNT_REG);
+ regs[6] = dsaf_read_dev(drv, XGMAC_PORT_MODE_REG);
+ regs[7] = dsaf_read_dev(drv, XGMAC_CLK_ENABLE_REG);
+ regs[8] = dsaf_read_dev(drv, XGMAC_RESET_REG);
+ regs[9] = dsaf_read_dev(drv, XGMAC_LINK_CONTROL_REG);
+ regs[10] = dsaf_read_dev(drv, XGMAC_LINK_STATUS_REG);
+
+ regs[11] = dsaf_read_dev(drv, XGMAC_SPARE_REG);
+ regs[12] = dsaf_read_dev(drv, XGMAC_SPARE_CNT_REG);
+ regs[13] = dsaf_read_dev(drv, XGMAC_MAC_ENABLE_REG);
+ regs[14] = dsaf_read_dev(drv, XGMAC_MAC_CONTROL_REG);
+ regs[15] = dsaf_read_dev(drv, XGMAC_MAC_IPG_REG);
+ regs[16] = dsaf_read_dev(drv, XGMAC_MAC_MSG_CRC_EN_REG);
+ regs[17] = dsaf_read_dev(drv, XGMAC_MAC_MSG_IMG_REG);
+ regs[18] = dsaf_read_dev(drv, XGMAC_MAC_MSG_FC_CFG_REG);
+ regs[19] = dsaf_read_dev(drv, XGMAC_MAC_MSG_TC_CFG_REG);
+ regs[20] = dsaf_read_dev(drv, XGMAC_MAC_PAD_SIZE_REG);
+ regs[21] = dsaf_read_dev(drv, XGMAC_MAC_MIN_PKT_SIZE_REG);
+ regs[22] = dsaf_read_dev(drv, XGMAC_MAC_MAX_PKT_SIZE_REG);
+ regs[23] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_CTRL_REG);
+ regs[24] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_TIME_REG);
+ regs[25] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_GAP_REG);
+ regs[26] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_LOCAL_MAC_H_REG);
+ regs[27] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_LOCAL_MAC_L_REG);
+ regs[28] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_PEER_MAC_H_REG);
+ regs[29] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_PEER_MAC_L_REG);
+ regs[30] = dsaf_read_dev(drv, XGMAC_MAC_PFC_PRI_EN_REG);
+ regs[31] = dsaf_read_dev(drv, XGMAC_MAC_1588_CTRL_REG);
+ regs[32] = dsaf_read_dev(drv, XGMAC_MAC_1588_TX_PORT_DLY_REG);
+ regs[33] = dsaf_read_dev(drv, XGMAC_MAC_1588_RX_PORT_DLY_REG);
+ regs[34] = dsaf_read_dev(drv, XGMAC_MAC_1588_ASYM_DLY_REG);
+ regs[35] = dsaf_read_dev(drv, XGMAC_MAC_1588_ADJUST_CFG_REG);
+
+ regs[36] = dsaf_read_dev(drv, XGMAC_MAC_Y1731_ETH_TYPE_REG);
+ regs[37] = dsaf_read_dev(drv, XGMAC_MAC_MIB_CONTROL_REG);
+ regs[38] = dsaf_read_dev(drv, XGMAC_MAC_WAN_RATE_ADJUST_REG);
+ regs[39] = dsaf_read_dev(drv, XGMAC_MAC_TX_ERR_MARK_REG);
+ regs[40] = dsaf_read_dev(drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG);
+ regs[41] = dsaf_read_dev(drv, XGMAC_MAC_RX_LF_RF_STATUS_REG);
+ regs[42] = dsaf_read_dev(drv, XGMAC_MAC_TX_RUNT_PKT_CNT_REG);
+ regs[43] = dsaf_read_dev(drv, XGMAC_MAC_RX_RUNT_PKT_CNT_REG);
+ regs[44] = dsaf_read_dev(drv, XGMAC_MAC_RX_PREAM_ERR_PKT_CNT_REG);
+ regs[45] = dsaf_read_dev(drv, XGMAC_MAC_TX_LF_RF_TERM_PKT_CNT_REG);
+ regs[46] = dsaf_read_dev(drv, XGMAC_MAC_TX_SN_MISMATCH_PKT_CNT_REG);
+ regs[47] = dsaf_read_dev(drv, XGMAC_MAC_RX_ERR_MSG_CNT_REG);
+ regs[48] = dsaf_read_dev(drv, XGMAC_MAC_RX_ERR_EFD_CNT_REG);
+ regs[49] = dsaf_read_dev(drv, XGMAC_MAC_ERR_INFO_REG);
+ regs[50] = dsaf_read_dev(drv, XGMAC_MAC_DBG_INFO_REG);
+
+ regs[51] = dsaf_read_dev(drv, XGMAC_PCS_BASER_SYNC_THD_REG);
+ regs[52] = dsaf_read_dev(drv, XGMAC_PCS_STATUS1_REG);
+ regs[53] = dsaf_read_dev(drv, XGMAC_PCS_BASER_STATUS1_REG);
+ regs[54] = dsaf_read_dev(drv, XGMAC_PCS_BASER_STATUS2_REG);
+ regs[55] = dsaf_read_dev(drv, XGMAC_PCS_BASER_SEEDA_0_REG);
+ regs[56] = dsaf_read_dev(drv, XGMAC_PCS_BASER_SEEDA_1_REG);
+ regs[57] = dsaf_read_dev(drv, XGMAC_PCS_BASER_SEEDB_0_REG);
+ regs[58] = dsaf_read_dev(drv, XGMAC_PCS_BASER_SEEDB_1_REG);
+ regs[59] = dsaf_read_dev(drv, XGMAC_PCS_BASER_TEST_CONTROL_REG);
+ regs[60] = dsaf_read_dev(drv, XGMAC_PCS_BASER_TEST_ERR_CNT_REG);
+ regs[61] = dsaf_read_dev(drv, XGMAC_PCS_DBG_INFO_REG);
+ regs[62] = dsaf_read_dev(drv, XGMAC_PCS_DBG_INFO1_REG);
+ regs[63] = dsaf_read_dev(drv, XGMAC_PCS_DBG_INFO2_REG);
+ regs[64] = dsaf_read_dev(drv, XGMAC_PCS_DBG_INFO3_REG);
+
+ regs[65] = dsaf_read_dev(drv, XGMAC_PMA_ENABLE_REG);
+ regs[66] = dsaf_read_dev(drv, XGMAC_PMA_CONTROL_REG);
+ regs[67] = dsaf_read_dev(drv, XGMAC_PMA_SIGNAL_STATUS_REG);
+ regs[68] = dsaf_read_dev(drv, XGMAC_PMA_DBG_INFO_REG);
+ regs[69] = dsaf_read_dev(drv, XGMAC_PMA_FEC_ABILITY_REG);
+ regs[70] = dsaf_read_dev(drv, XGMAC_PMA_FEC_CONTROL_REG);
+ regs[71] = dsaf_read_dev(drv, XGMAC_PMA_FEC_CORR_BLOCK_CNT__REG);
+ regs[72] = dsaf_read_dev(drv, XGMAC_PMA_FEC_UNCORR_BLOCK_CNT__REG);
+
+ /* status registers */
+#define hns_xgmac_cpy_q(p, q) \
+ do {\
+ *(p) = (u32)(q);\
+ *((p) + 1) = (u32)((q) >> 32);\
+ } while (0)
+
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_FRAGMENT);
+ hns_xgmac_cpy_q(&regs[73], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_UNDERSIZE);
+ hns_xgmac_cpy_q(&regs[75], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_UNDERMIN);
+ hns_xgmac_cpy_q(&regs[77], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_64OCTETS);
+ hns_xgmac_cpy_q(&regs[79], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_65TO127OCTETS);
+ hns_xgmac_cpy_q(&regs[81], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_128TO255OCTETS);
+ hns_xgmac_cpy_q(&regs[83], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_256TO511OCTETS);
+ hns_xgmac_cpy_q(&regs[85], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_512TO1023OCTETS);
+ hns_xgmac_cpy_q(&regs[87], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_1024TO1518OCTETS);
+ hns_xgmac_cpy_q(&regs[89], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_1519TOMAXOCTETS);
+ hns_xgmac_cpy_q(&regs[91], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_1519TOMAXOCTETSOK);
+ hns_xgmac_cpy_q(&regs[93], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_OVERSIZE);
+ hns_xgmac_cpy_q(&regs[95], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_JABBER);
+ hns_xgmac_cpy_q(&regs[97], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_GOODPKTS);
+ hns_xgmac_cpy_q(&regs[99], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_GOODOCTETS);
+ hns_xgmac_cpy_q(&regs[101], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_TOTAL_PKTS);
+ hns_xgmac_cpy_q(&regs[103], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_TOTALOCTETS);
+ hns_xgmac_cpy_q(&regs[105], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_UNICASTPKTS);
+ hns_xgmac_cpy_q(&regs[107], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_MULTICASTPKTS);
+ hns_xgmac_cpy_q(&regs[109], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_BROADCASTPKTS);
+ hns_xgmac_cpy_q(&regs[111], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI0PAUSEPKTS);
+ hns_xgmac_cpy_q(&regs[113], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI1PAUSEPKTS);
+ hns_xgmac_cpy_q(&regs[115], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI2PAUSEPKTS);
+ hns_xgmac_cpy_q(&regs[117], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI3PAUSEPKTS);
+ hns_xgmac_cpy_q(&regs[119], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI4PAUSEPKTS);
+ hns_xgmac_cpy_q(&regs[121], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI5PAUSEPKTS);
+ hns_xgmac_cpy_q(&regs[123], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI6PAUSEPKTS);
+ hns_xgmac_cpy_q(&regs[125], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI7PAUSEPKTS);
+ hns_xgmac_cpy_q(&regs[127], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_MACCTRLPKTS);
+ hns_xgmac_cpy_q(&regs[129], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_1731PKTS);
+ hns_xgmac_cpy_q(&regs[131], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_1588PKTS);
+ hns_xgmac_cpy_q(&regs[133], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_FROMAPPGOODPKTS);
+ hns_xgmac_cpy_q(&regs[135], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_FROMAPPBADPKTS);
+ hns_xgmac_cpy_q(&regs[137], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_ERRALLPKTS);
+ hns_xgmac_cpy_q(&regs[139], qtmp);
+
+ /* RX */
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_FRAGMENT);
+ hns_xgmac_cpy_q(&regs[141], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTSUNDERSIZE);
+ hns_xgmac_cpy_q(&regs[143], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_UNDERMIN);
+ hns_xgmac_cpy_q(&regs[145], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_64OCTETS);
+ hns_xgmac_cpy_q(&regs[147], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_65TO127OCTETS);
+ hns_xgmac_cpy_q(&regs[149], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_128TO255OCTETS);
+ hns_xgmac_cpy_q(&regs[151], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_256TO511OCTETS);
+ hns_xgmac_cpy_q(&regs[153], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_512TO1023OCTETS);
+ hns_xgmac_cpy_q(&regs[155], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_1024TO1518OCTETS);
+ hns_xgmac_cpy_q(&regs[157], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_1519TOMAXOCTETS);
+ hns_xgmac_cpy_q(&regs[159], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_1519TOMAXOCTETSOK);
+ hns_xgmac_cpy_q(&regs[161], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_OVERSIZE);
+ hns_xgmac_cpy_q(&regs[163], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_JABBER);
+ hns_xgmac_cpy_q(&regs[165], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_GOODPKTS);
+ hns_xgmac_cpy_q(&regs[167], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_GOODOCTETS);
+ hns_xgmac_cpy_q(&regs[169], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_TOTAL_PKTS);
+ hns_xgmac_cpy_q(&regs[171], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_TOTALOCTETS);
+ hns_xgmac_cpy_q(&regs[173], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_UNICASTPKTS);
+ hns_xgmac_cpy_q(&regs[175], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_MULTICASTPKTS);
+ hns_xgmac_cpy_q(&regs[177], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_BROADCASTPKTS);
+ hns_xgmac_cpy_q(&regs[179], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI0PAUSEPKTS);
+ hns_xgmac_cpy_q(&regs[181], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI1PAUSEPKTS);
+ hns_xgmac_cpy_q(&regs[183], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI2PAUSEPKTS);
+ hns_xgmac_cpy_q(&regs[185], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI3PAUSEPKTS);
+ hns_xgmac_cpy_q(&regs[187], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI4PAUSEPKTS);
+ hns_xgmac_cpy_q(&regs[189], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI5PAUSEPKTS);
+ hns_xgmac_cpy_q(&regs[191], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI6PAUSEPKTS);
+ hns_xgmac_cpy_q(&regs[193], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI7PAUSEPKTS);
+ hns_xgmac_cpy_q(&regs[195], qtmp);
+
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_MACCTRLPKTS);
+ hns_xgmac_cpy_q(&regs[197], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_SENDAPPGOODPKTS);
+ hns_xgmac_cpy_q(&regs[199], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_SENDAPPBADPKTS);
+ hns_xgmac_cpy_q(&regs[201], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_1731PKTS);
+ hns_xgmac_cpy_q(&regs[203], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_SYMBOLERRPKTS);
+ hns_xgmac_cpy_q(&regs[205], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_FCSERRPKTS);
+ hns_xgmac_cpy_q(&regs[207], qtmp);
+
+ /* mark end of mac regs */
+ for (i = 208; i < 214; i++)
+ regs[i] = 0xaaaaaaaa;
+}
+
+/**
+ *hns_xgmac_get_stats - get xgmac statistic
+ *@mac_drv: mac driver
+ *@data:data for value of stats regs
+ */
+static void hns_xgmac_get_stats(void *mac_drv, u64 *data)
+{
+ u32 i;
+ u64 *buf = data;
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+ struct mac_hw_stats *hw_stats = NULL;
+
+ hw_stats = &drv->mac_cb->hw_stats;
+
+ for (i = 0; i < ARRAY_SIZE(g_xgmac_stats_string); i++) {
+ buf[i] = DSAF_STATS_READ(hw_stats,
+ g_xgmac_stats_string[i].offset);
+ }
+}
+
+/**
+ *hns_xgmac_get_strings - get xgmac strings name
+ *@stringset: type of values in data
+ *@data:data for value of string name
+ */
+static void hns_xgmac_get_strings(u32 stringset, u8 *data)
+{
+ u8 *buff = data;
+ u32 i;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(g_xgmac_stats_string); i++)
+ ethtool_sprintf(&buff, g_xgmac_stats_string[i].desc);
+}
+
+/**
+ *hns_xgmac_get_sset_count - get xgmac string set count
+ *@stringset: type of values in data
+ *return xgmac string set count
+ */
+static int hns_xgmac_get_sset_count(int stringset)
+{
+ if (stringset == ETH_SS_STATS || stringset == ETH_SS_PRIV_FLAGS)
+ return ARRAY_SIZE(g_xgmac_stats_string);
+
+ return 0;
+}
+
+/**
+ *hns_xgmac_get_regs_count - get xgmac regs count
+ *return xgmac regs count
+ */
+static int hns_xgmac_get_regs_count(void)
+{
+ return HNS_XGMAC_DUMP_NUM;
+}
+
+void *hns_xgmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param)
+{
+ struct mac_driver *mac_drv;
+
+ mac_drv = devm_kzalloc(mac_cb->dev, sizeof(*mac_drv), GFP_KERNEL);
+ if (!mac_drv)
+ return NULL;
+
+ mac_drv->mac_init = hns_xgmac_init;
+ mac_drv->mac_enable = hns_xgmac_enable;
+ mac_drv->mac_disable = hns_xgmac_disable;
+
+ mac_drv->mac_id = mac_param->mac_id;
+ mac_drv->mac_mode = mac_param->mac_mode;
+ mac_drv->io_base = mac_param->vaddr;
+ mac_drv->dev = mac_param->dev;
+ mac_drv->mac_cb = mac_cb;
+
+ mac_drv->set_mac_addr = hns_xgmac_set_pausefrm_mac_addr;
+ mac_drv->set_an_mode = NULL;
+ mac_drv->config_loopback = NULL;
+ mac_drv->config_pad_and_crc = hns_xgmac_config_pad_and_crc;
+ mac_drv->mac_free = hns_xgmac_free;
+ mac_drv->adjust_link = NULL;
+ mac_drv->set_tx_auto_pause_frames = hns_xgmac_set_tx_auto_pause_frames;
+ mac_drv->config_max_frame_length = hns_xgmac_config_max_frame_length;
+ mac_drv->mac_pausefrm_cfg = hns_xgmac_pausefrm_cfg;
+ mac_drv->autoneg_stat = NULL;
+ mac_drv->get_info = hns_xgmac_get_info;
+ mac_drv->get_pause_enable = hns_xgmac_get_pausefrm_cfg;
+ mac_drv->get_link_status = hns_xgmac_get_link_status;
+ mac_drv->get_regs = hns_xgmac_get_regs;
+ mac_drv->get_ethtool_stats = hns_xgmac_get_stats;
+ mac_drv->get_sset_count = hns_xgmac_get_sset_count;
+ mac_drv->get_regs_count = hns_xgmac_get_regs_count;
+ mac_drv->get_strings = hns_xgmac_get_strings;
+ mac_drv->update_stats = hns_xgmac_update_stats;
+
+ return (void *)mac_drv;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.h
new file mode 100644
index 0000000000..e1b3db9807
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ */
+
+#ifndef _HNS_XGMAC_H
+#define _HNS_XGMAC_H
+
+#define HNS_XGMAC_DUMP_NUM 214
+#define HNS_XGMAC_NO_LF_RF_INSERT 0x0
+#define HNS_XGMAC_LF_INSERT 0x2
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
new file mode 100644
index 0000000000..85722afe21
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -0,0 +1,2451 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ */
+
+#include <linux/clk.h>
+#include <linux/cpumask.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/skbuff.h>
+
+#include "hnae.h"
+#include "hns_enet.h"
+#include "hns_dsaf_mac.h"
+
+#define NIC_MAX_Q_PER_VF 16
+#define HNS_NIC_TX_TIMEOUT (5 * HZ)
+
+#define SERVICE_TIMER_HZ (1 * HZ)
+
+#define RCB_IRQ_NOT_INITED 0
+#define RCB_IRQ_INITED 1
+#define HNS_BUFFER_SIZE_2048 2048
+
+#define BD_MAX_SEND_SIZE 8191
+
+static void fill_v2_desc_hw(struct hnae_ring *ring, void *priv, int size,
+ int send_sz, dma_addr_t dma, int frag_end,
+ int buf_num, enum hns_desc_type type, int mtu)
+{
+ struct hnae_desc *desc = &ring->desc[ring->next_to_use];
+ struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
+ struct iphdr *iphdr;
+ struct ipv6hdr *ipv6hdr;
+ struct sk_buff *skb;
+ __be16 protocol;
+ u8 bn_pid = 0;
+ u8 rrcfv = 0;
+ u8 ip_offset = 0;
+ u8 tvsvsn = 0;
+ u16 mss = 0;
+ u8 l4_len = 0;
+ u16 paylen = 0;
+
+ desc_cb->priv = priv;
+ desc_cb->length = size;
+ desc_cb->dma = dma;
+ desc_cb->type = type;
+
+ desc->addr = cpu_to_le64(dma);
+ desc->tx.send_size = cpu_to_le16((u16)send_sz);
+
+ /* config bd buffer end */
+ hnae_set_bit(rrcfv, HNSV2_TXD_VLD_B, 1);
+ hnae_set_field(bn_pid, HNSV2_TXD_BUFNUM_M, 0, buf_num - 1);
+
+ /* fill port_id in the tx bd for sending management pkts */
+ hnae_set_field(bn_pid, HNSV2_TXD_PORTID_M,
+ HNSV2_TXD_PORTID_S, ring->q->handle->dport_id);
+
+ if (type == DESC_TYPE_SKB) {
+ skb = (struct sk_buff *)priv;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ skb_reset_mac_len(skb);
+ protocol = skb->protocol;
+ ip_offset = ETH_HLEN;
+
+ if (protocol == htons(ETH_P_8021Q)) {
+ ip_offset += VLAN_HLEN;
+ protocol = vlan_get_protocol(skb);
+ skb->protocol = protocol;
+ }
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ iphdr = ip_hdr(skb);
+ hnae_set_bit(rrcfv, HNSV2_TXD_L3CS_B, 1);
+ hnae_set_bit(rrcfv, HNSV2_TXD_L4CS_B, 1);
+
+ /* check for tcp/udp header */
+ if (iphdr->protocol == IPPROTO_TCP &&
+ skb_is_gso(skb)) {
+ hnae_set_bit(tvsvsn,
+ HNSV2_TXD_TSE_B, 1);
+ l4_len = tcp_hdrlen(skb);
+ mss = skb_shinfo(skb)->gso_size;
+ paylen = skb->len - skb_tcp_all_headers(skb);
+ }
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ hnae_set_bit(tvsvsn, HNSV2_TXD_IPV6_B, 1);
+ ipv6hdr = ipv6_hdr(skb);
+ hnae_set_bit(rrcfv, HNSV2_TXD_L4CS_B, 1);
+
+ /* check for tcp/udp header */
+ if (ipv6hdr->nexthdr == IPPROTO_TCP &&
+ skb_is_gso(skb) && skb_is_gso_v6(skb)) {
+ hnae_set_bit(tvsvsn,
+ HNSV2_TXD_TSE_B, 1);
+ l4_len = tcp_hdrlen(skb);
+ mss = skb_shinfo(skb)->gso_size;
+ paylen = skb->len - skb_tcp_all_headers(skb);
+ }
+ }
+ desc->tx.ip_offset = ip_offset;
+ desc->tx.tse_vlan_snap_v6_sctp_nth = tvsvsn;
+ desc->tx.mss = cpu_to_le16(mss);
+ desc->tx.l4_len = l4_len;
+ desc->tx.paylen = cpu_to_le16(paylen);
+ }
+ }
+
+ hnae_set_bit(rrcfv, HNSV2_TXD_FE_B, frag_end);
+
+ desc->tx.bn_pid = bn_pid;
+ desc->tx.ra_ri_cs_fe_vld = rrcfv;
+
+ ring_ptr_move_fw(ring, next_to_use);
+}
+
+static void fill_v2_desc(struct hnae_ring *ring, void *priv,
+ int size, dma_addr_t dma, int frag_end,
+ int buf_num, enum hns_desc_type type, int mtu)
+{
+ fill_v2_desc_hw(ring, priv, size, size, dma, frag_end,
+ buf_num, type, mtu);
+}
+
+static const struct acpi_device_id hns_enet_acpi_match[] = {
+ { "HISI00C1", 0 },
+ { "HISI00C2", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, hns_enet_acpi_match);
+
+static void fill_desc(struct hnae_ring *ring, void *priv,
+ int size, dma_addr_t dma, int frag_end,
+ int buf_num, enum hns_desc_type type, int mtu,
+ bool is_gso)
+{
+ struct hnae_desc *desc = &ring->desc[ring->next_to_use];
+ struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
+ struct sk_buff *skb;
+ __be16 protocol;
+ u32 ip_offset;
+ u32 asid_bufnum_pid = 0;
+ u32 flag_ipoffset = 0;
+
+ desc_cb->priv = priv;
+ desc_cb->length = size;
+ desc_cb->dma = dma;
+ desc_cb->type = type;
+
+ desc->addr = cpu_to_le64(dma);
+ desc->tx.send_size = cpu_to_le16((u16)size);
+
+ /*config bd buffer end */
+ flag_ipoffset |= 1 << HNS_TXD_VLD_B;
+
+ asid_bufnum_pid |= buf_num << HNS_TXD_BUFNUM_S;
+
+ if (type == DESC_TYPE_SKB) {
+ skb = (struct sk_buff *)priv;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ protocol = skb->protocol;
+ ip_offset = ETH_HLEN;
+
+ /*if it is a SW VLAN check the next protocol*/
+ if (protocol == htons(ETH_P_8021Q)) {
+ ip_offset += VLAN_HLEN;
+ protocol = vlan_get_protocol(skb);
+ skb->protocol = protocol;
+ }
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ flag_ipoffset |= 1 << HNS_TXD_L3CS_B;
+ /* check for tcp/udp header */
+ flag_ipoffset |= 1 << HNS_TXD_L4CS_B;
+
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ /* ipv6 has not l3 cs, check for L4 header */
+ flag_ipoffset |= 1 << HNS_TXD_L4CS_B;
+ }
+
+ flag_ipoffset |= ip_offset << HNS_TXD_IPOFFSET_S;
+ }
+ }
+
+ flag_ipoffset |= frag_end << HNS_TXD_FE_B;
+
+ desc->tx.asid_bufnum_pid = cpu_to_le16(asid_bufnum_pid);
+ desc->tx.flag_ipoffset = cpu_to_le32(flag_ipoffset);
+
+ ring_ptr_move_fw(ring, next_to_use);
+}
+
+static void unfill_desc(struct hnae_ring *ring)
+{
+ ring_ptr_move_bw(ring, next_to_use);
+}
+
+static int hns_nic_maybe_stop_tx(
+ struct sk_buff **out_skb, int *bnum, struct hnae_ring *ring)
+{
+ struct sk_buff *skb = *out_skb;
+ struct sk_buff *new_skb = NULL;
+ int buf_num;
+
+ /* no. of segments (plus a header) */
+ buf_num = skb_shinfo(skb)->nr_frags + 1;
+
+ if (unlikely(buf_num > ring->max_desc_num_per_pkt)) {
+ if (ring_space(ring) < 1)
+ return -EBUSY;
+
+ new_skb = skb_copy(skb, GFP_ATOMIC);
+ if (!new_skb)
+ return -ENOMEM;
+
+ dev_kfree_skb_any(skb);
+ *out_skb = new_skb;
+ buf_num = 1;
+ } else if (buf_num > ring_space(ring)) {
+ return -EBUSY;
+ }
+
+ *bnum = buf_num;
+ return 0;
+}
+
+static int hns_nic_maybe_stop_tso(
+ struct sk_buff **out_skb, int *bnum, struct hnae_ring *ring)
+{
+ int i;
+ int size;
+ int buf_num;
+ int frag_num;
+ struct sk_buff *skb = *out_skb;
+ struct sk_buff *new_skb = NULL;
+ skb_frag_t *frag;
+
+ size = skb_headlen(skb);
+ buf_num = (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
+
+ frag_num = skb_shinfo(skb)->nr_frags;
+ for (i = 0; i < frag_num; i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+ size = skb_frag_size(frag);
+ buf_num += (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
+ }
+
+ if (unlikely(buf_num > ring->max_desc_num_per_pkt)) {
+ buf_num = (skb->len + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
+ if (ring_space(ring) < buf_num)
+ return -EBUSY;
+ /* manual split the send packet */
+ new_skb = skb_copy(skb, GFP_ATOMIC);
+ if (!new_skb)
+ return -ENOMEM;
+ dev_kfree_skb_any(skb);
+ *out_skb = new_skb;
+
+ } else if (ring_space(ring) < buf_num) {
+ return -EBUSY;
+ }
+
+ *bnum = buf_num;
+ return 0;
+}
+
+static int hns_nic_maybe_stop_tx_v2(struct sk_buff **out_skb, int *bnum,
+ struct hnae_ring *ring)
+{
+ if (skb_is_gso(*out_skb))
+ return hns_nic_maybe_stop_tso(out_skb, bnum, ring);
+ else
+ return hns_nic_maybe_stop_tx(out_skb, bnum, ring);
+}
+
+static void fill_tso_desc(struct hnae_ring *ring, void *priv,
+ int size, dma_addr_t dma, int frag_end,
+ int buf_num, enum hns_desc_type type, int mtu)
+{
+ int frag_buf_num;
+ int sizeoflast;
+ int k;
+
+ frag_buf_num = (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
+ sizeoflast = size % BD_MAX_SEND_SIZE;
+ sizeoflast = sizeoflast ? sizeoflast : BD_MAX_SEND_SIZE;
+
+ /* when the frag size is bigger than hardware, split this frag */
+ for (k = 0; k < frag_buf_num; k++)
+ fill_v2_desc_hw(ring, priv, k == 0 ? size : 0,
+ (k == frag_buf_num - 1) ?
+ sizeoflast : BD_MAX_SEND_SIZE,
+ dma + BD_MAX_SEND_SIZE * k,
+ frag_end && (k == frag_buf_num - 1) ? 1 : 0,
+ buf_num,
+ (type == DESC_TYPE_SKB && !k) ?
+ DESC_TYPE_SKB : DESC_TYPE_PAGE,
+ mtu);
+}
+
+static void fill_desc_v2(struct hnae_ring *ring, void *priv,
+ int size, dma_addr_t dma, int frag_end,
+ int buf_num, enum hns_desc_type type, int mtu,
+ bool is_gso)
+{
+ if (is_gso)
+ fill_tso_desc(ring, priv, size, dma, frag_end, buf_num, type,
+ mtu);
+ else
+ fill_v2_desc(ring, priv, size, dma, frag_end, buf_num, type,
+ mtu);
+}
+
+netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
+ struct sk_buff *skb,
+ struct hns_nic_ring_data *ring_data)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct hnae_ring *ring = ring_data->ring;
+ struct device *dev = ring_to_dev(ring);
+ struct netdev_queue *dev_queue;
+ skb_frag_t *frag;
+ int buf_num;
+ int seg_num;
+ dma_addr_t dma;
+ int size, next_to_use;
+ bool is_gso;
+ int i;
+
+ switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) {
+ case -EBUSY:
+ ring->stats.tx_busy++;
+ goto out_net_tx_busy;
+ case -ENOMEM:
+ ring->stats.sw_err_cnt++;
+ netdev_err(ndev, "no memory to xmit!\n");
+ goto out_err_tx_ok;
+ default:
+ break;
+ }
+
+ /* no. of segments (plus a header) */
+ seg_num = skb_shinfo(skb)->nr_frags + 1;
+ next_to_use = ring->next_to_use;
+
+ /* fill the first part */
+ size = skb_headlen(skb);
+ dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma)) {
+ netdev_err(ndev, "TX head DMA map failed\n");
+ ring->stats.sw_err_cnt++;
+ goto out_err_tx_ok;
+ }
+ is_gso = skb_is_gso(skb);
+ priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0,
+ buf_num, DESC_TYPE_SKB, ndev->mtu, is_gso);
+
+ /* fill the fragments */
+ for (i = 1; i < seg_num; i++) {
+ frag = &skb_shinfo(skb)->frags[i - 1];
+ size = skb_frag_size(frag);
+ dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma)) {
+ netdev_err(ndev, "TX frag(%d) DMA map failed\n", i);
+ ring->stats.sw_err_cnt++;
+ goto out_map_frag_fail;
+ }
+ priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma,
+ seg_num - 1 == i ? 1 : 0, buf_num,
+ DESC_TYPE_PAGE, ndev->mtu, is_gso);
+ }
+
+ /*complete translate all packets*/
+ dev_queue = netdev_get_tx_queue(ndev, skb->queue_mapping);
+ netdev_tx_sent_queue(dev_queue, skb->len);
+
+ netif_trans_update(ndev);
+ ndev->stats.tx_bytes += skb->len;
+ ndev->stats.tx_packets++;
+
+ wmb(); /* commit all data before submit */
+ assert(skb->queue_mapping < priv->ae_handle->q_num);
+ hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num);
+
+ return NETDEV_TX_OK;
+
+out_map_frag_fail:
+
+ while (ring->next_to_use != next_to_use) {
+ unfill_desc(ring);
+ if (ring->next_to_use != next_to_use)
+ dma_unmap_page(dev,
+ ring->desc_cb[ring->next_to_use].dma,
+ ring->desc_cb[ring->next_to_use].length,
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_single(dev,
+ ring->desc_cb[next_to_use].dma,
+ ring->desc_cb[next_to_use].length,
+ DMA_TO_DEVICE);
+ }
+
+out_err_tx_ok:
+
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+
+out_net_tx_busy:
+
+ netif_stop_subqueue(ndev, skb->queue_mapping);
+
+ /* Herbert's original patch had:
+ * smp_mb__after_netif_stop_queue();
+ * but since that doesn't exist yet, just open code it.
+ */
+ smp_mb();
+ return NETDEV_TX_BUSY;
+}
+
+static void hns_nic_reuse_page(struct sk_buff *skb, int i,
+ struct hnae_ring *ring, int pull_len,
+ struct hnae_desc_cb *desc_cb)
+{
+ struct hnae_desc *desc;
+ u32 truesize;
+ int size;
+ int last_offset;
+ bool twobufs;
+
+ twobufs = ((PAGE_SIZE < 8192) &&
+ hnae_buf_size(ring) == HNS_BUFFER_SIZE_2048);
+
+ desc = &ring->desc[ring->next_to_clean];
+ size = le16_to_cpu(desc->rx.size);
+
+ if (twobufs) {
+ truesize = hnae_buf_size(ring);
+ } else {
+ truesize = ALIGN(size, L1_CACHE_BYTES);
+ last_offset = hnae_page_size(ring) - hnae_buf_size(ring);
+ }
+
+ skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
+ size - pull_len, truesize);
+
+ /* avoid re-using remote pages,flag default unreuse */
+ if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()))
+ return;
+
+ if (twobufs) {
+ /* if we are only owner of page we can reuse it */
+ if (likely(page_count(desc_cb->priv) == 1)) {
+ /* flip page offset to other buffer */
+ desc_cb->page_offset ^= truesize;
+
+ desc_cb->reuse_flag = 1;
+ /* bump ref count on page before it is given*/
+ get_page(desc_cb->priv);
+ }
+ return;
+ }
+
+ /* move offset up to the next cache line */
+ desc_cb->page_offset += truesize;
+
+ if (desc_cb->page_offset <= last_offset) {
+ desc_cb->reuse_flag = 1;
+ /* bump ref count on page before it is given*/
+ get_page(desc_cb->priv);
+ }
+}
+
+static void get_v2rx_desc_bnum(u32 bnum_flag, int *out_bnum)
+{
+ *out_bnum = hnae_get_field(bnum_flag,
+ HNS_RXD_BUFNUM_M, HNS_RXD_BUFNUM_S) + 1;
+}
+
+static void get_rx_desc_bnum(u32 bnum_flag, int *out_bnum)
+{
+ *out_bnum = hnae_get_field(bnum_flag,
+ HNS_RXD_BUFNUM_M, HNS_RXD_BUFNUM_S);
+}
+
+static void hns_nic_rx_checksum(struct hns_nic_ring_data *ring_data,
+ struct sk_buff *skb, u32 flag)
+{
+ struct net_device *netdev = ring_data->napi.dev;
+ u32 l3id;
+ u32 l4id;
+
+ /* check if RX checksum offload is enabled */
+ if (unlikely(!(netdev->features & NETIF_F_RXCSUM)))
+ return;
+
+ /* In hardware, we only support checksum for the following protocols:
+ * 1) IPv4,
+ * 2) TCP(over IPv4 or IPv6),
+ * 3) UDP(over IPv4 or IPv6),
+ * 4) SCTP(over IPv4 or IPv6)
+ * but we support many L3(IPv4, IPv6, MPLS, PPPoE etc) and L4(TCP,
+ * UDP, GRE, SCTP, IGMP, ICMP etc.) protocols.
+ *
+ * Hardware limitation:
+ * Our present hardware RX Descriptor lacks L3/L4 checksum "Status &
+ * Error" bit (which usually can be used to indicate whether checksum
+ * was calculated by the hardware and if there was any error encountered
+ * during checksum calculation).
+ *
+ * Software workaround:
+ * We do get info within the RX descriptor about the kind of L3/L4
+ * protocol coming in the packet and the error status. These errors
+ * might not just be checksum errors but could be related to version,
+ * length of IPv4, UDP, TCP etc.
+ * Because there is no-way of knowing if it is a L3/L4 error due to bad
+ * checksum or any other L3/L4 error, we will not (cannot) convey
+ * checksum status for such cases to upper stack and will not maintain
+ * the RX L3/L4 checksum counters as well.
+ */
+
+ l3id = hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S);
+ l4id = hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S);
+
+ /* check L3 protocol for which checksum is supported */
+ if ((l3id != HNS_RX_FLAG_L3ID_IPV4) && (l3id != HNS_RX_FLAG_L3ID_IPV6))
+ return;
+
+ /* check for any(not just checksum)flagged L3 protocol errors */
+ if (unlikely(hnae_get_bit(flag, HNS_RXD_L3E_B)))
+ return;
+
+ /* we do not support checksum of fragmented packets */
+ if (unlikely(hnae_get_bit(flag, HNS_RXD_FRAG_B)))
+ return;
+
+ /* check L4 protocol for which checksum is supported */
+ if ((l4id != HNS_RX_FLAG_L4ID_TCP) &&
+ (l4id != HNS_RX_FLAG_L4ID_UDP) &&
+ (l4id != HNS_RX_FLAG_L4ID_SCTP))
+ return;
+
+ /* check for any(not just checksum)flagged L4 protocol errors */
+ if (unlikely(hnae_get_bit(flag, HNS_RXD_L4E_B)))
+ return;
+
+ /* now, this has to be a packet with valid RX checksum */
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+
+static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data,
+ struct sk_buff **out_skb, int *out_bnum)
+{
+ struct hnae_ring *ring = ring_data->ring;
+ struct net_device *ndev = ring_data->napi.dev;
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct sk_buff *skb;
+ struct hnae_desc *desc;
+ struct hnae_desc_cb *desc_cb;
+ unsigned char *va;
+ int bnum, length, i;
+ int pull_len;
+ u32 bnum_flag;
+
+ desc = &ring->desc[ring->next_to_clean];
+ desc_cb = &ring->desc_cb[ring->next_to_clean];
+
+ prefetch(desc);
+
+ va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
+
+ /* prefetch first cache line of first page */
+ net_prefetch(va);
+
+ skb = *out_skb = napi_alloc_skb(&ring_data->napi,
+ HNS_RX_HEAD_SIZE);
+ if (unlikely(!skb)) {
+ ring->stats.sw_err_cnt++;
+ return -ENOMEM;
+ }
+
+ prefetchw(skb->data);
+ length = le16_to_cpu(desc->rx.pkt_len);
+ bnum_flag = le32_to_cpu(desc->rx.ipoff_bnum_pid_flag);
+ priv->ops.get_rxd_bnum(bnum_flag, &bnum);
+ *out_bnum = bnum;
+
+ if (length <= HNS_RX_HEAD_SIZE) {
+ memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
+
+ /* we can reuse buffer as-is, just make sure it is local */
+ if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
+ desc_cb->reuse_flag = 1;
+ else /* this page cannot be reused so discard it */
+ put_page(desc_cb->priv);
+
+ ring_ptr_move_fw(ring, next_to_clean);
+
+ if (unlikely(bnum != 1)) { /* check err*/
+ *out_bnum = 1;
+ goto out_bnum_err;
+ }
+ } else {
+ ring->stats.seg_pkt_cnt++;
+
+ pull_len = eth_get_headlen(ndev, va, HNS_RX_HEAD_SIZE);
+ memcpy(__skb_put(skb, pull_len), va,
+ ALIGN(pull_len, sizeof(long)));
+
+ hns_nic_reuse_page(skb, 0, ring, pull_len, desc_cb);
+ ring_ptr_move_fw(ring, next_to_clean);
+
+ if (unlikely(bnum >= (int)MAX_SKB_FRAGS)) { /* check err*/
+ *out_bnum = 1;
+ goto out_bnum_err;
+ }
+ for (i = 1; i < bnum; i++) {
+ desc = &ring->desc[ring->next_to_clean];
+ desc_cb = &ring->desc_cb[ring->next_to_clean];
+
+ hns_nic_reuse_page(skb, i, ring, 0, desc_cb);
+ ring_ptr_move_fw(ring, next_to_clean);
+ }
+ }
+
+ /* check except process, free skb and jump the desc */
+ if (unlikely((!bnum) || (bnum > ring->max_desc_num_per_pkt))) {
+out_bnum_err:
+ *out_bnum = *out_bnum ? *out_bnum : 1; /* ntc moved,cannot 0*/
+ netdev_err(ndev, "invalid bnum(%d,%d,%d,%d),%016llx,%016llx\n",
+ bnum, ring->max_desc_num_per_pkt,
+ length, (int)MAX_SKB_FRAGS,
+ ((u64 *)desc)[0], ((u64 *)desc)[1]);
+ ring->stats.err_bd_num++;
+ dev_kfree_skb_any(skb);
+ return -EDOM;
+ }
+
+ bnum_flag = le32_to_cpu(desc->rx.ipoff_bnum_pid_flag);
+
+ if (unlikely(!hnae_get_bit(bnum_flag, HNS_RXD_VLD_B))) {
+ netdev_err(ndev, "no valid bd,%016llx,%016llx\n",
+ ((u64 *)desc)[0], ((u64 *)desc)[1]);
+ ring->stats.non_vld_descs++;
+ dev_kfree_skb_any(skb);
+ return -EINVAL;
+ }
+
+ if (unlikely((!desc->rx.pkt_len) ||
+ hnae_get_bit(bnum_flag, HNS_RXD_DROP_B))) {
+ ring->stats.err_pkt_len++;
+ dev_kfree_skb_any(skb);
+ return -EFAULT;
+ }
+
+ if (unlikely(hnae_get_bit(bnum_flag, HNS_RXD_L2E_B))) {
+ ring->stats.l2_err++;
+ dev_kfree_skb_any(skb);
+ return -EFAULT;
+ }
+
+ ring->stats.rx_pkts++;
+ ring->stats.rx_bytes += skb->len;
+
+ /* indicate to upper stack if our hardware has already calculated
+ * the RX checksum
+ */
+ hns_nic_rx_checksum(ring_data, skb, bnum_flag);
+
+ return 0;
+}
+
+static void
+hns_nic_alloc_rx_buffers(struct hns_nic_ring_data *ring_data, int cleand_count)
+{
+ int i, ret;
+ struct hnae_desc_cb res_cbs;
+ struct hnae_desc_cb *desc_cb;
+ struct hnae_ring *ring = ring_data->ring;
+ struct net_device *ndev = ring_data->napi.dev;
+
+ for (i = 0; i < cleand_count; i++) {
+ desc_cb = &ring->desc_cb[ring->next_to_use];
+ if (desc_cb->reuse_flag) {
+ ring->stats.reuse_pg_cnt++;
+ hnae_reuse_buffer(ring, ring->next_to_use);
+ } else {
+ ret = hnae_reserve_buffer_map(ring, &res_cbs);
+ if (ret) {
+ ring->stats.sw_err_cnt++;
+ netdev_err(ndev, "hnae reserve buffer map failed.\n");
+ break;
+ }
+ hnae_replace_buffer(ring, ring->next_to_use, &res_cbs);
+ }
+
+ ring_ptr_move_fw(ring, next_to_use);
+ }
+
+ wmb(); /* make all data has been write before submit */
+ writel_relaxed(i, ring->io_base + RCB_REG_HEAD);
+}
+
+/* return error number for error or number of desc left to take
+ */
+static void hns_nic_rx_up_pro(struct hns_nic_ring_data *ring_data,
+ struct sk_buff *skb)
+{
+ struct net_device *ndev = ring_data->napi.dev;
+
+ skb->protocol = eth_type_trans(skb, ndev);
+ napi_gro_receive(&ring_data->napi, skb);
+}
+
+static int hns_desc_unused(struct hnae_ring *ring)
+{
+ int ntc = ring->next_to_clean;
+ int ntu = ring->next_to_use;
+
+ return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
+}
+
+#define HNS_LOWEST_LATENCY_RATE 27 /* 27 MB/s */
+#define HNS_LOW_LATENCY_RATE 80 /* 80 MB/s */
+
+#define HNS_COAL_BDNUM 3
+
+static u32 hns_coal_rx_bdnum(struct hnae_ring *ring)
+{
+ bool coal_enable = ring->q->handle->coal_adapt_en;
+
+ if (coal_enable &&
+ ring->coal_last_rx_bytes > HNS_LOWEST_LATENCY_RATE)
+ return HNS_COAL_BDNUM;
+ else
+ return 0;
+}
+
+static void hns_update_rx_rate(struct hnae_ring *ring)
+{
+ bool coal_enable = ring->q->handle->coal_adapt_en;
+ u32 time_passed_ms;
+ u64 total_bytes;
+
+ if (!coal_enable ||
+ time_before(jiffies, ring->coal_last_jiffies + (HZ >> 4)))
+ return;
+
+ /* ring->stats.rx_bytes overflowed */
+ if (ring->coal_last_rx_bytes > ring->stats.rx_bytes) {
+ ring->coal_last_rx_bytes = ring->stats.rx_bytes;
+ ring->coal_last_jiffies = jiffies;
+ return;
+ }
+
+ total_bytes = ring->stats.rx_bytes - ring->coal_last_rx_bytes;
+ time_passed_ms = jiffies_to_msecs(jiffies - ring->coal_last_jiffies);
+ do_div(total_bytes, time_passed_ms);
+ ring->coal_rx_rate = total_bytes >> 10;
+
+ ring->coal_last_rx_bytes = ring->stats.rx_bytes;
+ ring->coal_last_jiffies = jiffies;
+}
+
+/**
+ * smooth_alg - smoothing algrithm for adjusting coalesce parameter
+ * @new_param: new value
+ * @old_param: old value
+ **/
+static u32 smooth_alg(u32 new_param, u32 old_param)
+{
+ u32 gap = (new_param > old_param) ? new_param - old_param
+ : old_param - new_param;
+
+ if (gap > 8)
+ gap >>= 3;
+
+ if (new_param > old_param)
+ return old_param + gap;
+ else
+ return old_param - gap;
+}
+
+/**
+ * hns_nic_adpt_coalesce - self adapte coalesce according to rx rate
+ * @ring_data: pointer to hns_nic_ring_data
+ **/
+static void hns_nic_adpt_coalesce(struct hns_nic_ring_data *ring_data)
+{
+ struct hnae_ring *ring = ring_data->ring;
+ struct hnae_handle *handle = ring->q->handle;
+ u32 new_coal_param, old_coal_param = ring->coal_param;
+
+ if (ring->coal_rx_rate < HNS_LOWEST_LATENCY_RATE)
+ new_coal_param = HNAE_LOWEST_LATENCY_COAL_PARAM;
+ else if (ring->coal_rx_rate < HNS_LOW_LATENCY_RATE)
+ new_coal_param = HNAE_LOW_LATENCY_COAL_PARAM;
+ else
+ new_coal_param = HNAE_BULK_LATENCY_COAL_PARAM;
+
+ if (new_coal_param == old_coal_param &&
+ new_coal_param == handle->coal_param)
+ return;
+
+ new_coal_param = smooth_alg(new_coal_param, old_coal_param);
+ ring->coal_param = new_coal_param;
+
+ /**
+ * Because all ring in one port has one coalesce param, when one ring
+ * calculate its own coalesce param, it cannot write to hardware at
+ * once. There are three conditions as follows:
+ * 1. current ring's coalesce param is larger than the hardware.
+ * 2. or ring which adapt last time can change again.
+ * 3. timeout.
+ */
+ if (new_coal_param == handle->coal_param) {
+ handle->coal_last_jiffies = jiffies;
+ handle->coal_ring_idx = ring_data->queue_index;
+ } else if (new_coal_param > handle->coal_param ||
+ handle->coal_ring_idx == ring_data->queue_index ||
+ time_after(jiffies, handle->coal_last_jiffies + (HZ >> 4))) {
+ handle->dev->ops->set_coalesce_usecs(handle,
+ new_coal_param);
+ handle->dev->ops->set_coalesce_frames(handle,
+ 1, new_coal_param);
+ handle->coal_param = new_coal_param;
+ handle->coal_ring_idx = ring_data->queue_index;
+ handle->coal_last_jiffies = jiffies;
+ }
+}
+
+static int hns_nic_rx_poll_one(struct hns_nic_ring_data *ring_data,
+ int budget, void *v)
+{
+ struct hnae_ring *ring = ring_data->ring;
+ struct sk_buff *skb;
+ int num, bnum;
+#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
+ int recv_pkts, recv_bds, clean_count, err;
+ int unused_count = hns_desc_unused(ring);
+
+ num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
+ rmb(); /* make sure num taken effect before the other data is touched */
+
+ recv_pkts = 0, recv_bds = 0, clean_count = 0;
+ num -= unused_count;
+
+ while (recv_pkts < budget && recv_bds < num) {
+ /* reuse or realloc buffers */
+ if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
+ hns_nic_alloc_rx_buffers(ring_data,
+ clean_count + unused_count);
+ clean_count = 0;
+ unused_count = hns_desc_unused(ring);
+ }
+
+ /* poll one pkt */
+ err = hns_nic_poll_rx_skb(ring_data, &skb, &bnum);
+ if (unlikely(!skb)) /* this fault cannot be repaired */
+ goto out;
+
+ recv_bds += bnum;
+ clean_count += bnum;
+ if (unlikely(err)) { /* do jump the err */
+ recv_pkts++;
+ continue;
+ }
+
+ /* do update ip stack process*/
+ ((void (*)(struct hns_nic_ring_data *, struct sk_buff *))v)(
+ ring_data, skb);
+ recv_pkts++;
+ }
+
+out:
+ /* make all data has been write before submit */
+ if (clean_count + unused_count > 0)
+ hns_nic_alloc_rx_buffers(ring_data,
+ clean_count + unused_count);
+
+ return recv_pkts;
+}
+
+static bool hns_nic_rx_fini_pro(struct hns_nic_ring_data *ring_data)
+{
+ struct hnae_ring *ring = ring_data->ring;
+ int num;
+ bool rx_stopped;
+
+ hns_update_rx_rate(ring);
+
+ /* for hardware bug fixed */
+ ring_data->ring->q->handle->dev->ops->toggle_ring_irq(ring, 0);
+ num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
+
+ if (num <= hns_coal_rx_bdnum(ring)) {
+ if (ring->q->handle->coal_adapt_en)
+ hns_nic_adpt_coalesce(ring_data);
+
+ rx_stopped = true;
+ } else {
+ ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
+ ring_data->ring, 1);
+
+ rx_stopped = false;
+ }
+
+ return rx_stopped;
+}
+
+static bool hns_nic_rx_fini_pro_v2(struct hns_nic_ring_data *ring_data)
+{
+ struct hnae_ring *ring = ring_data->ring;
+ int num;
+
+ hns_update_rx_rate(ring);
+ num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
+
+ if (num <= hns_coal_rx_bdnum(ring)) {
+ if (ring->q->handle->coal_adapt_en)
+ hns_nic_adpt_coalesce(ring_data);
+
+ return true;
+ }
+
+ return false;
+}
+
+static inline void hns_nic_reclaim_one_desc(struct hnae_ring *ring,
+ int *bytes, int *pkts)
+{
+ struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
+
+ (*pkts) += (desc_cb->type == DESC_TYPE_SKB);
+ (*bytes) += desc_cb->length;
+ /* desc_cb will be cleaned, after hnae_free_buffer_detach*/
+ hnae_free_buffer_detach(ring, ring->next_to_clean);
+
+ ring_ptr_move_fw(ring, next_to_clean);
+}
+
+static int is_valid_clean_head(struct hnae_ring *ring, int h)
+{
+ int u = ring->next_to_use;
+ int c = ring->next_to_clean;
+
+ if (unlikely(h > ring->desc_num))
+ return 0;
+
+ assert(u > 0 && u < ring->desc_num);
+ assert(c > 0 && c < ring->desc_num);
+ assert(u != c && h != c); /* must be checked before call this func */
+
+ return u > c ? (h > c && h <= u) : (h > c || h <= u);
+}
+
+/* reclaim all desc in one budget
+ * return error or number of desc left
+ */
+static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
+ int budget, void *v)
+{
+ struct hnae_ring *ring = ring_data->ring;
+ struct net_device *ndev = ring_data->napi.dev;
+ struct netdev_queue *dev_queue;
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ int head;
+ int bytes, pkts;
+
+ head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
+ rmb(); /* make sure head is ready before touch any data */
+
+ if (is_ring_empty(ring) || head == ring->next_to_clean)
+ return 0; /* no data to poll */
+
+ if (!is_valid_clean_head(ring, head)) {
+ netdev_err(ndev, "wrong head (%d, %d-%d)\n", head,
+ ring->next_to_use, ring->next_to_clean);
+ ring->stats.io_err_cnt++;
+ return -EIO;
+ }
+
+ bytes = 0;
+ pkts = 0;
+ while (head != ring->next_to_clean) {
+ hns_nic_reclaim_one_desc(ring, &bytes, &pkts);
+ /* issue prefetch for next Tx descriptor */
+ prefetch(&ring->desc_cb[ring->next_to_clean]);
+ }
+ /* update tx ring statistics. */
+ ring->stats.tx_pkts += pkts;
+ ring->stats.tx_bytes += bytes;
+
+ dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
+ netdev_tx_completed_queue(dev_queue, pkts, bytes);
+
+ if (unlikely(priv->link && !netif_carrier_ok(ndev)))
+ netif_carrier_on(ndev);
+
+ if (unlikely(pkts && netif_carrier_ok(ndev) &&
+ (ring_space(ring) >= ring->max_desc_num_per_pkt * 2))) {
+ /* Make sure that anybody stopping the queue after this
+ * sees the new next_to_clean.
+ */
+ smp_mb();
+ if (netif_tx_queue_stopped(dev_queue) &&
+ !test_bit(NIC_STATE_DOWN, &priv->state)) {
+ netif_tx_wake_queue(dev_queue);
+ ring->stats.restart_queue++;
+ }
+ }
+ return 0;
+}
+
+static bool hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data)
+{
+ struct hnae_ring *ring = ring_data->ring;
+ int head;
+
+ ring_data->ring->q->handle->dev->ops->toggle_ring_irq(ring, 0);
+
+ head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
+
+ if (head != ring->next_to_clean) {
+ ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
+ ring_data->ring, 1);
+
+ return false;
+ } else {
+ return true;
+ }
+}
+
+static bool hns_nic_tx_fini_pro_v2(struct hns_nic_ring_data *ring_data)
+{
+ struct hnae_ring *ring = ring_data->ring;
+ int head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
+
+ if (head == ring->next_to_clean)
+ return true;
+ else
+ return false;
+}
+
+static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data)
+{
+ struct hnae_ring *ring = ring_data->ring;
+ struct net_device *ndev = ring_data->napi.dev;
+ struct netdev_queue *dev_queue;
+ int head;
+ int bytes, pkts;
+
+ head = ring->next_to_use; /* ntu :soft setted ring position*/
+ bytes = 0;
+ pkts = 0;
+ while (head != ring->next_to_clean)
+ hns_nic_reclaim_one_desc(ring, &bytes, &pkts);
+
+ dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
+ netdev_tx_reset_queue(dev_queue);
+}
+
+static int hns_nic_common_poll(struct napi_struct *napi, int budget)
+{
+ int clean_complete = 0;
+ struct hns_nic_ring_data *ring_data =
+ container_of(napi, struct hns_nic_ring_data, napi);
+ struct hnae_ring *ring = ring_data->ring;
+
+ clean_complete += ring_data->poll_one(
+ ring_data, budget - clean_complete,
+ ring_data->ex_process);
+
+ if (clean_complete < budget) {
+ if (ring_data->fini_process(ring_data)) {
+ napi_complete(napi);
+ ring->q->handle->dev->ops->toggle_ring_irq(ring, 0);
+ } else {
+ return budget;
+ }
+ }
+
+ return clean_complete;
+}
+
+static irqreturn_t hns_irq_handle(int irq, void *dev)
+{
+ struct hns_nic_ring_data *ring_data = (struct hns_nic_ring_data *)dev;
+
+ ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
+ ring_data->ring, 1);
+ napi_schedule(&ring_data->napi);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ *hns_nic_adjust_link - adjust net work mode by the phy stat or new param
+ *@ndev: net device
+ */
+static void hns_nic_adjust_link(struct net_device *ndev)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct hnae_handle *h = priv->ae_handle;
+ int state = 1;
+
+ /* If there is no phy, do not need adjust link */
+ if (ndev->phydev) {
+ /* When phy link down, do nothing */
+ if (ndev->phydev->link == 0)
+ return;
+
+ if (h->dev->ops->need_adjust_link(h, ndev->phydev->speed,
+ ndev->phydev->duplex)) {
+ /* because Hi161X chip don't support to change gmac
+ * speed and duplex with traffic. Delay 200ms to
+ * make sure there is no more data in chip FIFO.
+ */
+ netif_carrier_off(ndev);
+ msleep(200);
+ h->dev->ops->adjust_link(h, ndev->phydev->speed,
+ ndev->phydev->duplex);
+ netif_carrier_on(ndev);
+ }
+ }
+
+ state = state && h->dev->ops->get_status(h);
+
+ if (state != priv->link) {
+ if (state) {
+ netif_carrier_on(ndev);
+ netif_tx_wake_all_queues(ndev);
+ netdev_info(ndev, "link up\n");
+ } else {
+ netif_carrier_off(ndev);
+ netdev_info(ndev, "link down\n");
+ }
+ priv->link = state;
+ }
+}
+
+/**
+ *hns_nic_init_phy - init phy
+ *@ndev: net device
+ *@h: ae handle
+ * Return 0 on success, negative on failure
+ */
+int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, };
+ struct phy_device *phy_dev = h->phy_dev;
+ int ret;
+
+ if (!h->phy_dev)
+ return 0;
+
+ ethtool_convert_legacy_u32_to_link_mode(supported, h->if_support);
+ linkmode_and(phy_dev->supported, phy_dev->supported, supported);
+ linkmode_copy(phy_dev->advertising, phy_dev->supported);
+
+ if (h->phy_if == PHY_INTERFACE_MODE_XGMII)
+ phy_dev->autoneg = false;
+
+ if (h->phy_if != PHY_INTERFACE_MODE_XGMII) {
+ phy_dev->dev_flags = 0;
+
+ ret = phy_connect_direct(ndev, phy_dev, hns_nic_adjust_link,
+ h->phy_if);
+ } else {
+ ret = phy_attach_direct(ndev, phy_dev, 0, h->phy_if);
+ }
+ if (unlikely(ret))
+ return -ENODEV;
+
+ phy_attached_info(phy_dev);
+
+ return 0;
+}
+
+static int hns_nic_ring_open(struct net_device *netdev, int idx)
+{
+ struct hns_nic_priv *priv = netdev_priv(netdev);
+ struct hnae_handle *h = priv->ae_handle;
+
+ napi_enable(&priv->ring_data[idx].napi);
+
+ enable_irq(priv->ring_data[idx].ring->irq);
+ h->dev->ops->toggle_ring_irq(priv->ring_data[idx].ring, 0);
+
+ return 0;
+}
+
+static int hns_nic_net_set_mac_address(struct net_device *ndev, void *p)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct hnae_handle *h = priv->ae_handle;
+ struct sockaddr *mac_addr = p;
+ int ret;
+
+ if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ ret = h->dev->ops->set_mac_addr(h, mac_addr->sa_data);
+ if (ret) {
+ netdev_err(ndev, "set_mac_address fail, ret=%d!\n", ret);
+ return ret;
+ }
+
+ eth_hw_addr_set(ndev, mac_addr->sa_data);
+
+ return 0;
+}
+
+static void hns_nic_update_stats(struct net_device *netdev)
+{
+ struct hns_nic_priv *priv = netdev_priv(netdev);
+ struct hnae_handle *h = priv->ae_handle;
+
+ h->dev->ops->update_stats(h, &netdev->stats);
+}
+
+/* set mac addr if it is configed. or leave it to the AE driver */
+static void hns_init_mac_addr(struct net_device *ndev)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+
+ if (device_get_ethdev_address(priv->dev, ndev)) {
+ eth_hw_addr_random(ndev);
+ dev_warn(priv->dev, "No valid mac, use random mac %pM",
+ ndev->dev_addr);
+ }
+}
+
+static void hns_nic_ring_close(struct net_device *netdev, int idx)
+{
+ struct hns_nic_priv *priv = netdev_priv(netdev);
+ struct hnae_handle *h = priv->ae_handle;
+
+ h->dev->ops->toggle_ring_irq(priv->ring_data[idx].ring, 1);
+ disable_irq(priv->ring_data[idx].ring->irq);
+
+ napi_disable(&priv->ring_data[idx].napi);
+}
+
+static int hns_nic_init_affinity_mask(int q_num, int ring_idx,
+ struct hnae_ring *ring, cpumask_t *mask)
+{
+ int cpu;
+
+ /* Different irq balance between 16core and 32core.
+ * The cpu mask set by ring index according to the ring flag
+ * which indicate the ring is tx or rx.
+ */
+ if (q_num == num_possible_cpus()) {
+ if (is_tx_ring(ring))
+ cpu = ring_idx;
+ else
+ cpu = ring_idx - q_num;
+ } else {
+ if (is_tx_ring(ring))
+ cpu = ring_idx * 2;
+ else
+ cpu = (ring_idx - q_num) * 2 + 1;
+ }
+
+ cpumask_clear(mask);
+ cpumask_set_cpu(cpu, mask);
+
+ return cpu;
+}
+
+static void hns_nic_free_irq(int q_num, struct hns_nic_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < q_num * 2; i++) {
+ if (priv->ring_data[i].ring->irq_init_flag == RCB_IRQ_INITED) {
+ irq_set_affinity_hint(priv->ring_data[i].ring->irq,
+ NULL);
+ free_irq(priv->ring_data[i].ring->irq,
+ &priv->ring_data[i]);
+ priv->ring_data[i].ring->irq_init_flag =
+ RCB_IRQ_NOT_INITED;
+ }
+ }
+}
+
+static int hns_nic_init_irq(struct hns_nic_priv *priv)
+{
+ struct hnae_handle *h = priv->ae_handle;
+ struct hns_nic_ring_data *rd;
+ int i;
+ int ret;
+ int cpu;
+
+ for (i = 0; i < h->q_num * 2; i++) {
+ rd = &priv->ring_data[i];
+
+ if (rd->ring->irq_init_flag == RCB_IRQ_INITED)
+ break;
+
+ snprintf(rd->ring->ring_name, RCB_RING_NAME_LEN,
+ "%s-%s%d", priv->netdev->name,
+ (is_tx_ring(rd->ring) ? "tx" : "rx"), rd->queue_index);
+
+ rd->ring->ring_name[RCB_RING_NAME_LEN - 1] = '\0';
+
+ irq_set_status_flags(rd->ring->irq, IRQ_NOAUTOEN);
+ ret = request_irq(rd->ring->irq,
+ hns_irq_handle, 0, rd->ring->ring_name, rd);
+ if (ret) {
+ netdev_err(priv->netdev, "request irq(%d) fail\n",
+ rd->ring->irq);
+ goto out_free_irq;
+ }
+
+ cpu = hns_nic_init_affinity_mask(h->q_num, i,
+ rd->ring, &rd->mask);
+
+ if (cpu_online(cpu))
+ irq_set_affinity_hint(rd->ring->irq,
+ &rd->mask);
+
+ rd->ring->irq_init_flag = RCB_IRQ_INITED;
+ }
+
+ return 0;
+
+out_free_irq:
+ hns_nic_free_irq(h->q_num, priv);
+ return ret;
+}
+
+static int hns_nic_net_up(struct net_device *ndev)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct hnae_handle *h = priv->ae_handle;
+ int i, j;
+ int ret;
+
+ if (!test_bit(NIC_STATE_DOWN, &priv->state))
+ return 0;
+
+ ret = hns_nic_init_irq(priv);
+ if (ret != 0) {
+ netdev_err(ndev, "hns init irq failed! ret=%d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < h->q_num * 2; i++) {
+ ret = hns_nic_ring_open(ndev, i);
+ if (ret)
+ goto out_has_some_queues;
+ }
+
+ ret = h->dev->ops->set_mac_addr(h, ndev->dev_addr);
+ if (ret)
+ goto out_set_mac_addr_err;
+
+ ret = h->dev->ops->start ? h->dev->ops->start(h) : 0;
+ if (ret)
+ goto out_start_err;
+
+ if (ndev->phydev)
+ phy_start(ndev->phydev);
+
+ clear_bit(NIC_STATE_DOWN, &priv->state);
+ (void)mod_timer(&priv->service_timer, jiffies + SERVICE_TIMER_HZ);
+
+ return 0;
+
+out_start_err:
+ netif_stop_queue(ndev);
+out_set_mac_addr_err:
+out_has_some_queues:
+ for (j = i - 1; j >= 0; j--)
+ hns_nic_ring_close(ndev, j);
+
+ hns_nic_free_irq(h->q_num, priv);
+ set_bit(NIC_STATE_DOWN, &priv->state);
+
+ return ret;
+}
+
+static void hns_nic_net_down(struct net_device *ndev)
+{
+ int i;
+ struct hnae_ae_ops *ops;
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+
+ if (test_and_set_bit(NIC_STATE_DOWN, &priv->state))
+ return;
+
+ (void)del_timer_sync(&priv->service_timer);
+ netif_tx_stop_all_queues(ndev);
+ netif_carrier_off(ndev);
+ netif_tx_disable(ndev);
+ priv->link = 0;
+
+ if (ndev->phydev)
+ phy_stop(ndev->phydev);
+
+ ops = priv->ae_handle->dev->ops;
+
+ if (ops->stop)
+ ops->stop(priv->ae_handle);
+
+ netif_tx_stop_all_queues(ndev);
+
+ for (i = priv->ae_handle->q_num - 1; i >= 0; i--) {
+ hns_nic_ring_close(ndev, i);
+ hns_nic_ring_close(ndev, i + priv->ae_handle->q_num);
+
+ /* clean tx buffers*/
+ hns_nic_tx_clr_all_bufs(priv->ring_data + i);
+ }
+}
+
+void hns_nic_net_reset(struct net_device *ndev)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct hnae_handle *handle = priv->ae_handle;
+
+ while (test_and_set_bit(NIC_STATE_RESETTING, &priv->state))
+ usleep_range(1000, 2000);
+
+ (void)hnae_reinit_handle(handle);
+
+ clear_bit(NIC_STATE_RESETTING, &priv->state);
+}
+
+void hns_nic_net_reinit(struct net_device *netdev)
+{
+ struct hns_nic_priv *priv = netdev_priv(netdev);
+ enum hnae_port_type type = priv->ae_handle->port_type;
+
+ netif_trans_update(priv->netdev);
+ while (test_and_set_bit(NIC_STATE_REINITING, &priv->state))
+ usleep_range(1000, 2000);
+
+ hns_nic_net_down(netdev);
+
+ /* Only do hns_nic_net_reset in debug mode
+ * because of hardware limitation.
+ */
+ if (type == HNAE_PORT_DEBUG)
+ hns_nic_net_reset(netdev);
+
+ (void)hns_nic_net_up(netdev);
+ clear_bit(NIC_STATE_REINITING, &priv->state);
+}
+
+static int hns_nic_net_open(struct net_device *ndev)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct hnae_handle *h = priv->ae_handle;
+ int ret;
+
+ if (test_bit(NIC_STATE_TESTING, &priv->state))
+ return -EBUSY;
+
+ priv->link = 0;
+ netif_carrier_off(ndev);
+
+ ret = netif_set_real_num_tx_queues(ndev, h->q_num);
+ if (ret < 0) {
+ netdev_err(ndev, "netif_set_real_num_tx_queues fail, ret=%d!\n",
+ ret);
+ return ret;
+ }
+
+ ret = netif_set_real_num_rx_queues(ndev, h->q_num);
+ if (ret < 0) {
+ netdev_err(ndev,
+ "netif_set_real_num_rx_queues fail, ret=%d!\n", ret);
+ return ret;
+ }
+
+ ret = hns_nic_net_up(ndev);
+ if (ret) {
+ netdev_err(ndev,
+ "hns net up fail, ret=%d!\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hns_nic_net_stop(struct net_device *ndev)
+{
+ hns_nic_net_down(ndev);
+
+ return 0;
+}
+
+static void hns_tx_timeout_reset(struct hns_nic_priv *priv);
+#define HNS_TX_TIMEO_LIMIT (40 * HZ)
+static void hns_nic_net_timeout(struct net_device *ndev, unsigned int txqueue)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+
+ if (ndev->watchdog_timeo < HNS_TX_TIMEO_LIMIT) {
+ ndev->watchdog_timeo *= 2;
+ netdev_info(ndev, "watchdog_timo changed to %d.\n",
+ ndev->watchdog_timeo);
+ } else {
+ ndev->watchdog_timeo = HNS_NIC_TX_TIMEOUT;
+ hns_tx_timeout_reset(priv);
+ }
+}
+
+static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+
+ assert(skb->queue_mapping < priv->ae_handle->q_num);
+
+ return hns_nic_net_xmit_hw(ndev, skb,
+ &tx_ring_data(priv, skb->queue_mapping));
+}
+
+static void hns_nic_drop_rx_fetch(struct hns_nic_ring_data *ring_data,
+ struct sk_buff *skb)
+{
+ dev_kfree_skb_any(skb);
+}
+
+#define HNS_LB_TX_RING 0
+static struct sk_buff *hns_assemble_skb(struct net_device *ndev)
+{
+ struct sk_buff *skb;
+ struct ethhdr *ethhdr;
+ int frame_len;
+
+ /* allocate test skb */
+ skb = alloc_skb(64, GFP_KERNEL);
+ if (!skb)
+ return NULL;
+
+ skb_put(skb, 64);
+ skb->dev = ndev;
+ memset(skb->data, 0xFF, skb->len);
+
+ /* must be tcp/ip package */
+ ethhdr = (struct ethhdr *)skb->data;
+ ethhdr->h_proto = htons(ETH_P_IP);
+
+ frame_len = skb->len & (~1ul);
+ memset(&skb->data[frame_len / 2], 0xAA,
+ frame_len / 2 - 1);
+
+ skb->queue_mapping = HNS_LB_TX_RING;
+
+ return skb;
+}
+
+static int hns_enable_serdes_lb(struct net_device *ndev)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct hnae_handle *h = priv->ae_handle;
+ struct hnae_ae_ops *ops = h->dev->ops;
+ int speed, duplex;
+ int ret;
+
+ ret = ops->set_loopback(h, MAC_INTERNALLOOP_SERDES, 1);
+ if (ret)
+ return ret;
+
+ ret = ops->start ? ops->start(h) : 0;
+ if (ret)
+ return ret;
+
+ /* link adjust duplex*/
+ if (h->phy_if != PHY_INTERFACE_MODE_XGMII)
+ speed = 1000;
+ else
+ speed = 10000;
+ duplex = 1;
+
+ ops->adjust_link(h, speed, duplex);
+
+ /* wait h/w ready */
+ mdelay(300);
+
+ return 0;
+}
+
+static void hns_disable_serdes_lb(struct net_device *ndev)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct hnae_handle *h = priv->ae_handle;
+ struct hnae_ae_ops *ops = h->dev->ops;
+
+ ops->stop(h);
+ ops->set_loopback(h, MAC_INTERNALLOOP_SERDES, 0);
+}
+
+/**
+ *hns_nic_clear_all_rx_fetch - clear the chip fetched descriptions. The
+ *function as follows:
+ * 1. if one rx ring has found the page_offset is not equal 0 between head
+ * and tail, it means that the chip fetched the wrong descs for the ring
+ * which buffer size is 4096.
+ * 2. we set the chip serdes loopback and set rss indirection to the ring.
+ * 3. construct 64-bytes ip broadcast packages, wait the associated rx ring
+ * receiving all packages and it will fetch new descriptions.
+ * 4. recover to the original state.
+ *
+ *@ndev: net device
+ */
+static int hns_nic_clear_all_rx_fetch(struct net_device *ndev)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct hnae_handle *h = priv->ae_handle;
+ struct hnae_ae_ops *ops = h->dev->ops;
+ struct hns_nic_ring_data *rd;
+ struct hnae_ring *ring;
+ struct sk_buff *skb;
+ u32 *org_indir;
+ u32 *cur_indir;
+ int indir_size;
+ int head, tail;
+ int fetch_num;
+ int i, j;
+ bool found;
+ int retry_times;
+ int ret = 0;
+
+ /* alloc indir memory */
+ indir_size = ops->get_rss_indir_size(h) * sizeof(*org_indir);
+ org_indir = kzalloc(indir_size, GFP_KERNEL);
+ if (!org_indir)
+ return -ENOMEM;
+
+ /* store the original indirection */
+ ops->get_rss(h, org_indir, NULL, NULL);
+
+ cur_indir = kzalloc(indir_size, GFP_KERNEL);
+ if (!cur_indir) {
+ ret = -ENOMEM;
+ goto cur_indir_alloc_err;
+ }
+
+ /* set loopback */
+ if (hns_enable_serdes_lb(ndev)) {
+ ret = -EINVAL;
+ goto enable_serdes_lb_err;
+ }
+
+ /* foreach every rx ring to clear fetch desc */
+ for (i = 0; i < h->q_num; i++) {
+ ring = &h->qs[i]->rx_ring;
+ head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
+ tail = readl_relaxed(ring->io_base + RCB_REG_TAIL);
+ found = false;
+ fetch_num = ring_dist(ring, head, tail);
+
+ while (head != tail) {
+ if (ring->desc_cb[head].page_offset != 0) {
+ found = true;
+ break;
+ }
+
+ head++;
+ if (head == ring->desc_num)
+ head = 0;
+ }
+
+ if (found) {
+ for (j = 0; j < indir_size / sizeof(*org_indir); j++)
+ cur_indir[j] = i;
+ ops->set_rss(h, cur_indir, NULL, 0);
+
+ for (j = 0; j < fetch_num; j++) {
+ /* alloc one skb and init */
+ skb = hns_assemble_skb(ndev);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ rd = &tx_ring_data(priv, skb->queue_mapping);
+ hns_nic_net_xmit_hw(ndev, skb, rd);
+
+ retry_times = 0;
+ while (retry_times++ < 10) {
+ mdelay(10);
+ /* clean rx */
+ rd = &rx_ring_data(priv, i);
+ if (rd->poll_one(rd, fetch_num,
+ hns_nic_drop_rx_fetch))
+ break;
+ }
+
+ retry_times = 0;
+ while (retry_times++ < 10) {
+ mdelay(10);
+ /* clean tx ring 0 send package */
+ rd = &tx_ring_data(priv,
+ HNS_LB_TX_RING);
+ if (rd->poll_one(rd, fetch_num, NULL))
+ break;
+ }
+ }
+ }
+ }
+
+out:
+ /* restore everything */
+ ops->set_rss(h, org_indir, NULL, 0);
+ hns_disable_serdes_lb(ndev);
+enable_serdes_lb_err:
+ kfree(cur_indir);
+cur_indir_alloc_err:
+ kfree(org_indir);
+
+ return ret;
+}
+
+static int hns_nic_change_mtu(struct net_device *ndev, int new_mtu)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct hnae_handle *h = priv->ae_handle;
+ bool if_running = netif_running(ndev);
+ int ret;
+
+ /* MTU < 68 is an error and causes problems on some kernels */
+ if (new_mtu < 68)
+ return -EINVAL;
+
+ /* MTU no change */
+ if (new_mtu == ndev->mtu)
+ return 0;
+
+ if (!h->dev->ops->set_mtu)
+ return -ENOTSUPP;
+
+ if (if_running) {
+ (void)hns_nic_net_stop(ndev);
+ msleep(100);
+ }
+
+ if (priv->enet_ver != AE_VERSION_1 &&
+ ndev->mtu <= BD_SIZE_2048_MAX_MTU &&
+ new_mtu > BD_SIZE_2048_MAX_MTU) {
+ /* update desc */
+ hnae_reinit_all_ring_desc(h);
+
+ /* clear the package which the chip has fetched */
+ ret = hns_nic_clear_all_rx_fetch(ndev);
+
+ /* the page offset must be consist with desc */
+ hnae_reinit_all_ring_page_off(h);
+
+ if (ret) {
+ netdev_err(ndev, "clear the fetched desc fail\n");
+ goto out;
+ }
+ }
+
+ ret = h->dev->ops->set_mtu(h, new_mtu);
+ if (ret) {
+ netdev_err(ndev, "set mtu fail, return value %d\n",
+ ret);
+ goto out;
+ }
+
+ /* finally, set new mtu to netdevice */
+ ndev->mtu = new_mtu;
+
+out:
+ if (if_running) {
+ if (hns_nic_net_open(ndev)) {
+ netdev_err(ndev, "hns net open fail\n");
+ ret = -EINVAL;
+ }
+ }
+
+ return ret;
+}
+
+static int hns_nic_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct hns_nic_priv *priv = netdev_priv(netdev);
+
+ switch (priv->enet_ver) {
+ case AE_VERSION_1:
+ if (features & (NETIF_F_TSO | NETIF_F_TSO6))
+ netdev_info(netdev, "enet v1 do not support tso!\n");
+ break;
+ default:
+ break;
+ }
+ netdev->features = features;
+ return 0;
+}
+
+static netdev_features_t hns_nic_fix_features(
+ struct net_device *netdev, netdev_features_t features)
+{
+ struct hns_nic_priv *priv = netdev_priv(netdev);
+
+ switch (priv->enet_ver) {
+ case AE_VERSION_1:
+ features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_HW_VLAN_CTAG_FILTER);
+ break;
+ default:
+ break;
+ }
+ return features;
+}
+
+static int hns_nic_uc_sync(struct net_device *netdev, const unsigned char *addr)
+{
+ struct hns_nic_priv *priv = netdev_priv(netdev);
+ struct hnae_handle *h = priv->ae_handle;
+
+ if (h->dev->ops->add_uc_addr)
+ return h->dev->ops->add_uc_addr(h, addr);
+
+ return 0;
+}
+
+static int hns_nic_uc_unsync(struct net_device *netdev,
+ const unsigned char *addr)
+{
+ struct hns_nic_priv *priv = netdev_priv(netdev);
+ struct hnae_handle *h = priv->ae_handle;
+
+ if (h->dev->ops->rm_uc_addr)
+ return h->dev->ops->rm_uc_addr(h, addr);
+
+ return 0;
+}
+
+/**
+ * hns_set_multicast_list - set mutl mac address
+ * @ndev: net device
+ *
+ * return void
+ */
+static void hns_set_multicast_list(struct net_device *ndev)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct hnae_handle *h = priv->ae_handle;
+ struct netdev_hw_addr *ha = NULL;
+
+ if (!h) {
+ netdev_err(ndev, "hnae handle is null\n");
+ return;
+ }
+
+ if (h->dev->ops->clr_mc_addr)
+ if (h->dev->ops->clr_mc_addr(h))
+ netdev_err(ndev, "clear multicast address fail\n");
+
+ if (h->dev->ops->set_mc_addr) {
+ netdev_for_each_mc_addr(ha, ndev)
+ if (h->dev->ops->set_mc_addr(h, ha->addr))
+ netdev_err(ndev, "set multicast fail\n");
+ }
+}
+
+static void hns_nic_set_rx_mode(struct net_device *ndev)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct hnae_handle *h = priv->ae_handle;
+
+ if (h->dev->ops->set_promisc_mode) {
+ if (ndev->flags & IFF_PROMISC)
+ h->dev->ops->set_promisc_mode(h, 1);
+ else
+ h->dev->ops->set_promisc_mode(h, 0);
+ }
+
+ hns_set_multicast_list(ndev);
+
+ if (__dev_uc_sync(ndev, hns_nic_uc_sync, hns_nic_uc_unsync))
+ netdev_err(ndev, "sync uc address fail\n");
+}
+
+static void hns_nic_get_stats64(struct net_device *ndev,
+ struct rtnl_link_stats64 *stats)
+{
+ int idx;
+ u64 tx_bytes = 0;
+ u64 rx_bytes = 0;
+ u64 tx_pkts = 0;
+ u64 rx_pkts = 0;
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct hnae_handle *h = priv->ae_handle;
+
+ for (idx = 0; idx < h->q_num; idx++) {
+ tx_bytes += h->qs[idx]->tx_ring.stats.tx_bytes;
+ tx_pkts += h->qs[idx]->tx_ring.stats.tx_pkts;
+ rx_bytes += h->qs[idx]->rx_ring.stats.rx_bytes;
+ rx_pkts += h->qs[idx]->rx_ring.stats.rx_pkts;
+ }
+
+ stats->tx_bytes = tx_bytes;
+ stats->tx_packets = tx_pkts;
+ stats->rx_bytes = rx_bytes;
+ stats->rx_packets = rx_pkts;
+
+ stats->rx_errors = ndev->stats.rx_errors;
+ stats->multicast = ndev->stats.multicast;
+ stats->rx_length_errors = ndev->stats.rx_length_errors;
+ stats->rx_crc_errors = ndev->stats.rx_crc_errors;
+ stats->rx_missed_errors = ndev->stats.rx_missed_errors;
+
+ stats->tx_errors = ndev->stats.tx_errors;
+ stats->rx_dropped = ndev->stats.rx_dropped;
+ stats->tx_dropped = ndev->stats.tx_dropped;
+ stats->collisions = ndev->stats.collisions;
+ stats->rx_over_errors = ndev->stats.rx_over_errors;
+ stats->rx_frame_errors = ndev->stats.rx_frame_errors;
+ stats->rx_fifo_errors = ndev->stats.rx_fifo_errors;
+ stats->tx_aborted_errors = ndev->stats.tx_aborted_errors;
+ stats->tx_carrier_errors = ndev->stats.tx_carrier_errors;
+ stats->tx_fifo_errors = ndev->stats.tx_fifo_errors;
+ stats->tx_heartbeat_errors = ndev->stats.tx_heartbeat_errors;
+ stats->tx_window_errors = ndev->stats.tx_window_errors;
+ stats->rx_compressed = ndev->stats.rx_compressed;
+ stats->tx_compressed = ndev->stats.tx_compressed;
+}
+
+static u16
+hns_nic_select_queue(struct net_device *ndev, struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+ struct ethhdr *eth_hdr = (struct ethhdr *)skb->data;
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+
+ /* fix hardware broadcast/multicast packets queue loopback */
+ if (!AE_IS_VER1(priv->enet_ver) &&
+ is_multicast_ether_addr(eth_hdr->h_dest))
+ return 0;
+ else
+ return netdev_pick_tx(ndev, skb, NULL);
+}
+
+static const struct net_device_ops hns_nic_netdev_ops = {
+ .ndo_open = hns_nic_net_open,
+ .ndo_stop = hns_nic_net_stop,
+ .ndo_start_xmit = hns_nic_net_xmit,
+ .ndo_tx_timeout = hns_nic_net_timeout,
+ .ndo_set_mac_address = hns_nic_net_set_mac_address,
+ .ndo_change_mtu = hns_nic_change_mtu,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
+ .ndo_set_features = hns_nic_set_features,
+ .ndo_fix_features = hns_nic_fix_features,
+ .ndo_get_stats64 = hns_nic_get_stats64,
+ .ndo_set_rx_mode = hns_nic_set_rx_mode,
+ .ndo_select_queue = hns_nic_select_queue,
+};
+
+static void hns_nic_update_link_status(struct net_device *netdev)
+{
+ struct hns_nic_priv *priv = netdev_priv(netdev);
+
+ struct hnae_handle *h = priv->ae_handle;
+
+ if (h->phy_dev) {
+ if (h->phy_if != PHY_INTERFACE_MODE_XGMII)
+ return;
+
+ (void)genphy_read_status(h->phy_dev);
+ }
+ hns_nic_adjust_link(netdev);
+}
+
+/* for dumping key regs*/
+static void hns_nic_dump(struct hns_nic_priv *priv)
+{
+ struct hnae_handle *h = priv->ae_handle;
+ struct hnae_ae_ops *ops = h->dev->ops;
+ u32 *data, reg_num, i;
+
+ if (ops->get_regs_len && ops->get_regs) {
+ reg_num = ops->get_regs_len(priv->ae_handle);
+ reg_num = (reg_num + 3ul) & ~3ul;
+ data = kcalloc(reg_num, sizeof(u32), GFP_KERNEL);
+ if (data) {
+ ops->get_regs(priv->ae_handle, data);
+ for (i = 0; i < reg_num; i += 4)
+ pr_info("0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ i, data[i], data[i + 1],
+ data[i + 2], data[i + 3]);
+ kfree(data);
+ }
+ }
+
+ for (i = 0; i < h->q_num; i++) {
+ pr_info("tx_queue%d_next_to_clean:%d\n",
+ i, h->qs[i]->tx_ring.next_to_clean);
+ pr_info("tx_queue%d_next_to_use:%d\n",
+ i, h->qs[i]->tx_ring.next_to_use);
+ pr_info("rx_queue%d_next_to_clean:%d\n",
+ i, h->qs[i]->rx_ring.next_to_clean);
+ pr_info("rx_queue%d_next_to_use:%d\n",
+ i, h->qs[i]->rx_ring.next_to_use);
+ }
+}
+
+/* for resetting subtask */
+static void hns_nic_reset_subtask(struct hns_nic_priv *priv)
+{
+ enum hnae_port_type type = priv->ae_handle->port_type;
+
+ if (!test_bit(NIC_STATE2_RESET_REQUESTED, &priv->state))
+ return;
+ clear_bit(NIC_STATE2_RESET_REQUESTED, &priv->state);
+
+ /* If we're already down, removing or resetting, just bail */
+ if (test_bit(NIC_STATE_DOWN, &priv->state) ||
+ test_bit(NIC_STATE_REMOVING, &priv->state) ||
+ test_bit(NIC_STATE_RESETTING, &priv->state))
+ return;
+
+ hns_nic_dump(priv);
+ netdev_info(priv->netdev, "try to reset %s port!\n",
+ (type == HNAE_PORT_DEBUG ? "debug" : "service"));
+
+ rtnl_lock();
+ /* put off any impending NetWatchDogTimeout */
+ netif_trans_update(priv->netdev);
+ hns_nic_net_reinit(priv->netdev);
+
+ rtnl_unlock();
+}
+
+/* for doing service complete*/
+static void hns_nic_service_event_complete(struct hns_nic_priv *priv)
+{
+ WARN_ON(!test_bit(NIC_STATE_SERVICE_SCHED, &priv->state));
+ /* make sure to commit the things */
+ smp_mb__before_atomic();
+ clear_bit(NIC_STATE_SERVICE_SCHED, &priv->state);
+}
+
+static void hns_nic_service_task(struct work_struct *work)
+{
+ struct hns_nic_priv *priv
+ = container_of(work, struct hns_nic_priv, service_task);
+ struct hnae_handle *h = priv->ae_handle;
+
+ hns_nic_reset_subtask(priv);
+ hns_nic_update_link_status(priv->netdev);
+ h->dev->ops->update_led_status(h);
+ hns_nic_update_stats(priv->netdev);
+
+ hns_nic_service_event_complete(priv);
+}
+
+static void hns_nic_task_schedule(struct hns_nic_priv *priv)
+{
+ if (!test_bit(NIC_STATE_DOWN, &priv->state) &&
+ !test_bit(NIC_STATE_REMOVING, &priv->state) &&
+ !test_and_set_bit(NIC_STATE_SERVICE_SCHED, &priv->state))
+ (void)schedule_work(&priv->service_task);
+}
+
+static void hns_nic_service_timer(struct timer_list *t)
+{
+ struct hns_nic_priv *priv = from_timer(priv, t, service_timer);
+
+ (void)mod_timer(&priv->service_timer, jiffies + SERVICE_TIMER_HZ);
+
+ hns_nic_task_schedule(priv);
+}
+
+/**
+ * hns_tx_timeout_reset - initiate reset due to Tx timeout
+ * @priv: driver private struct
+ **/
+static void hns_tx_timeout_reset(struct hns_nic_priv *priv)
+{
+ /* Do the reset outside of interrupt context */
+ if (!test_bit(NIC_STATE_DOWN, &priv->state)) {
+ set_bit(NIC_STATE2_RESET_REQUESTED, &priv->state);
+ netdev_warn(priv->netdev,
+ "initiating reset due to tx timeout(%llu,0x%lx)\n",
+ priv->tx_timeout_count, priv->state);
+ priv->tx_timeout_count++;
+ hns_nic_task_schedule(priv);
+ }
+}
+
+static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
+{
+ struct hnae_handle *h = priv->ae_handle;
+ struct hns_nic_ring_data *rd;
+ bool is_ver1 = AE_IS_VER1(priv->enet_ver);
+ int i;
+
+ if (h->q_num > NIC_MAX_Q_PER_VF) {
+ netdev_err(priv->netdev, "too much queue (%d)\n", h->q_num);
+ return -EINVAL;
+ }
+
+ priv->ring_data = kzalloc(array3_size(h->q_num,
+ sizeof(*priv->ring_data), 2),
+ GFP_KERNEL);
+ if (!priv->ring_data)
+ return -ENOMEM;
+
+ for (i = 0; i < h->q_num; i++) {
+ rd = &priv->ring_data[i];
+ rd->queue_index = i;
+ rd->ring = &h->qs[i]->tx_ring;
+ rd->poll_one = hns_nic_tx_poll_one;
+ rd->fini_process = is_ver1 ? hns_nic_tx_fini_pro :
+ hns_nic_tx_fini_pro_v2;
+
+ netif_napi_add(priv->netdev, &rd->napi, hns_nic_common_poll);
+ rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
+ }
+ for (i = h->q_num; i < h->q_num * 2; i++) {
+ rd = &priv->ring_data[i];
+ rd->queue_index = i - h->q_num;
+ rd->ring = &h->qs[i - h->q_num]->rx_ring;
+ rd->poll_one = hns_nic_rx_poll_one;
+ rd->ex_process = hns_nic_rx_up_pro;
+ rd->fini_process = is_ver1 ? hns_nic_rx_fini_pro :
+ hns_nic_rx_fini_pro_v2;
+
+ netif_napi_add(priv->netdev, &rd->napi, hns_nic_common_poll);
+ rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
+ }
+
+ return 0;
+}
+
+static void hns_nic_uninit_ring_data(struct hns_nic_priv *priv)
+{
+ struct hnae_handle *h = priv->ae_handle;
+ int i;
+
+ for (i = 0; i < h->q_num * 2; i++) {
+ netif_napi_del(&priv->ring_data[i].napi);
+ if (priv->ring_data[i].ring->irq_init_flag == RCB_IRQ_INITED) {
+ (void)irq_set_affinity_hint(
+ priv->ring_data[i].ring->irq,
+ NULL);
+ free_irq(priv->ring_data[i].ring->irq,
+ &priv->ring_data[i]);
+ }
+
+ priv->ring_data[i].ring->irq_init_flag = RCB_IRQ_NOT_INITED;
+ }
+ kfree(priv->ring_data);
+}
+
+static void hns_nic_set_priv_ops(struct net_device *netdev)
+{
+ struct hns_nic_priv *priv = netdev_priv(netdev);
+ struct hnae_handle *h = priv->ae_handle;
+
+ if (AE_IS_VER1(priv->enet_ver)) {
+ priv->ops.fill_desc = fill_desc;
+ priv->ops.get_rxd_bnum = get_rx_desc_bnum;
+ priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
+ } else {
+ priv->ops.get_rxd_bnum = get_v2rx_desc_bnum;
+ priv->ops.fill_desc = fill_desc_v2;
+ priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx_v2;
+ netif_set_tso_max_size(netdev, 7 * 4096);
+ /* enable tso when init
+ * control tso on/off through TSE bit in bd
+ */
+ h->dev->ops->set_tso_stats(h, 1);
+ }
+}
+
+static int hns_nic_try_get_ae(struct net_device *ndev)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct hnae_handle *h;
+ int ret;
+
+ h = hnae_get_handle(&priv->netdev->dev,
+ priv->fwnode, priv->port_id, NULL);
+ if (IS_ERR_OR_NULL(h)) {
+ ret = -ENODEV;
+ dev_dbg(priv->dev, "has not handle, register notifier!\n");
+ goto out;
+ }
+ priv->ae_handle = h;
+
+ ret = hns_nic_init_phy(ndev, h);
+ if (ret) {
+ dev_err(priv->dev, "probe phy device fail!\n");
+ goto out_init_phy;
+ }
+
+ ret = hns_nic_init_ring_data(priv);
+ if (ret) {
+ ret = -ENOMEM;
+ goto out_init_ring_data;
+ }
+
+ hns_nic_set_priv_ops(ndev);
+
+ ret = register_netdev(ndev);
+ if (ret) {
+ dev_err(priv->dev, "probe register netdev fail!\n");
+ goto out_reg_ndev_fail;
+ }
+ return 0;
+
+out_reg_ndev_fail:
+ hns_nic_uninit_ring_data(priv);
+ priv->ring_data = NULL;
+out_init_phy:
+out_init_ring_data:
+ hnae_put_handle(priv->ae_handle);
+ priv->ae_handle = NULL;
+out:
+ return ret;
+}
+
+static int hns_nic_notifier_action(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct hns_nic_priv *priv =
+ container_of(nb, struct hns_nic_priv, notifier_block);
+
+ assert(action == HNAE_AE_REGISTER);
+
+ if (!hns_nic_try_get_ae(priv->netdev)) {
+ hnae_unregister_notifier(&priv->notifier_block);
+ priv->notifier_block.notifier_call = NULL;
+ }
+ return 0;
+}
+
+static int hns_nic_dev_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct net_device *ndev;
+ struct hns_nic_priv *priv;
+ u32 port_id;
+ int ret;
+
+ ndev = alloc_etherdev_mq(sizeof(struct hns_nic_priv), NIC_MAX_Q_PER_VF);
+ if (!ndev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ndev);
+
+ priv = netdev_priv(ndev);
+ priv->dev = dev;
+ priv->netdev = ndev;
+
+ if (dev_of_node(dev)) {
+ struct device_node *ae_node;
+
+ if (of_device_is_compatible(dev->of_node,
+ "hisilicon,hns-nic-v1"))
+ priv->enet_ver = AE_VERSION_1;
+ else
+ priv->enet_ver = AE_VERSION_2;
+
+ ae_node = of_parse_phandle(dev->of_node, "ae-handle", 0);
+ if (!ae_node) {
+ ret = -ENODEV;
+ dev_err(dev, "not find ae-handle\n");
+ goto out_read_prop_fail;
+ }
+ priv->fwnode = &ae_node->fwnode;
+ } else if (is_acpi_node(dev->fwnode)) {
+ struct fwnode_reference_args args;
+
+ if (acpi_dev_found(hns_enet_acpi_match[0].id))
+ priv->enet_ver = AE_VERSION_1;
+ else if (acpi_dev_found(hns_enet_acpi_match[1].id))
+ priv->enet_ver = AE_VERSION_2;
+ else {
+ ret = -ENXIO;
+ goto out_read_prop_fail;
+ }
+
+ /* try to find port-idx-in-ae first */
+ ret = acpi_node_get_property_reference(dev->fwnode,
+ "ae-handle", 0, &args);
+ if (ret) {
+ dev_err(dev, "not find ae-handle\n");
+ goto out_read_prop_fail;
+ }
+ if (!is_acpi_device_node(args.fwnode)) {
+ ret = -EINVAL;
+ goto out_read_prop_fail;
+ }
+ priv->fwnode = args.fwnode;
+ } else {
+ dev_err(dev, "cannot read cfg data from OF or acpi\n");
+ ret = -ENXIO;
+ goto out_read_prop_fail;
+ }
+
+ ret = device_property_read_u32(dev, "port-idx-in-ae", &port_id);
+ if (ret) {
+ /* only for old code compatible */
+ ret = device_property_read_u32(dev, "port-id", &port_id);
+ if (ret)
+ goto out_read_prop_fail;
+ /* for old dts, we need to caculate the port offset */
+ port_id = port_id < HNS_SRV_OFFSET ? port_id + HNS_DEBUG_OFFSET
+ : port_id - HNS_SRV_OFFSET;
+ }
+ priv->port_id = port_id;
+
+ hns_init_mac_addr(ndev);
+
+ ndev->watchdog_timeo = HNS_NIC_TX_TIMEOUT;
+ ndev->priv_flags |= IFF_UNICAST_FLT;
+ ndev->netdev_ops = &hns_nic_netdev_ops;
+ hns_ethtool_set_ops(ndev);
+
+ ndev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
+ NETIF_F_GRO;
+ ndev->vlan_features |=
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
+ ndev->vlan_features |= NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
+
+ /* MTU range: 68 - 9578 (v1) or 9706 (v2) */
+ ndev->min_mtu = MAC_MIN_MTU;
+ switch (priv->enet_ver) {
+ case AE_VERSION_2:
+ ndev->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_NTUPLE;
+ ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
+ NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6;
+ ndev->vlan_features |= NETIF_F_TSO | NETIF_F_TSO6;
+ ndev->max_mtu = MAC_MAX_MTU_V2 -
+ (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
+ break;
+ default:
+ ndev->max_mtu = MAC_MAX_MTU -
+ (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
+ break;
+ }
+
+ SET_NETDEV_DEV(ndev, dev);
+
+ if (!dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))
+ dev_dbg(dev, "set mask to 64bit\n");
+ else
+ dev_err(dev, "set mask to 64bit fail!\n");
+
+ /* carrier off reporting is important to ethtool even BEFORE open */
+ netif_carrier_off(ndev);
+
+ timer_setup(&priv->service_timer, hns_nic_service_timer, 0);
+ INIT_WORK(&priv->service_task, hns_nic_service_task);
+
+ set_bit(NIC_STATE_SERVICE_INITED, &priv->state);
+ clear_bit(NIC_STATE_SERVICE_SCHED, &priv->state);
+ set_bit(NIC_STATE_DOWN, &priv->state);
+
+ if (hns_nic_try_get_ae(priv->netdev)) {
+ priv->notifier_block.notifier_call = hns_nic_notifier_action;
+ ret = hnae_register_notifier(&priv->notifier_block);
+ if (ret) {
+ dev_err(dev, "register notifier fail!\n");
+ goto out_notify_fail;
+ }
+ dev_dbg(dev, "has not handle, register notifier!\n");
+ }
+
+ return 0;
+
+out_notify_fail:
+ (void)cancel_work_sync(&priv->service_task);
+out_read_prop_fail:
+ /* safe for ACPI FW */
+ of_node_put(to_of_node(priv->fwnode));
+ free_netdev(ndev);
+ return ret;
+}
+
+static int hns_nic_dev_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+
+ if (ndev->reg_state != NETREG_UNINITIALIZED)
+ unregister_netdev(ndev);
+
+ if (priv->ring_data)
+ hns_nic_uninit_ring_data(priv);
+ priv->ring_data = NULL;
+
+ if (ndev->phydev)
+ phy_disconnect(ndev->phydev);
+
+ if (!IS_ERR_OR_NULL(priv->ae_handle))
+ hnae_put_handle(priv->ae_handle);
+ priv->ae_handle = NULL;
+ if (priv->notifier_block.notifier_call)
+ hnae_unregister_notifier(&priv->notifier_block);
+ priv->notifier_block.notifier_call = NULL;
+
+ set_bit(NIC_STATE_REMOVING, &priv->state);
+ (void)cancel_work_sync(&priv->service_task);
+
+ /* safe for ACPI FW */
+ of_node_put(to_of_node(priv->fwnode));
+
+ free_netdev(ndev);
+ return 0;
+}
+
+static const struct of_device_id hns_enet_of_match[] = {
+ {.compatible = "hisilicon,hns-nic-v1",},
+ {.compatible = "hisilicon,hns-nic-v2",},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, hns_enet_of_match);
+
+static struct platform_driver hns_nic_dev_driver = {
+ .driver = {
+ .name = "hns-nic",
+ .of_match_table = hns_enet_of_match,
+ .acpi_match_table = ACPI_PTR(hns_enet_acpi_match),
+ },
+ .probe = hns_nic_dev_probe,
+ .remove = hns_nic_dev_remove,
+};
+
+module_platform_driver(hns_nic_dev_driver);
+
+MODULE_DESCRIPTION("HISILICON HNS Ethernet driver");
+MODULE_AUTHOR("Hisilicon, Inc.");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:hns-nic");
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.h b/drivers/net/ethernet/hisilicon/hns/hns_enet.h
new file mode 100644
index 0000000000..3f3ee032f6
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ */
+
+#ifndef __HNS_ENET_H
+#define __HNS_ENET_H
+
+#include <linux/netdevice.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+
+#include "hnae.h"
+
+#define HNS_DEBUG_OFFSET 6
+#define HNS_SRV_OFFSET 2
+
+enum hns_nic_state {
+ NIC_STATE_TESTING = 0,
+ NIC_STATE_RESETTING,
+ NIC_STATE_REINITING,
+ NIC_STATE_DOWN,
+ NIC_STATE_DISABLED,
+ NIC_STATE_REMOVING,
+ NIC_STATE_SERVICE_INITED,
+ NIC_STATE_SERVICE_SCHED,
+ NIC_STATE2_RESET_REQUESTED,
+ NIC_STATE_MAX
+};
+
+struct hns_nic_ring_data {
+ struct hnae_ring *ring;
+ struct napi_struct napi;
+ cpumask_t mask; /* affinity mask */
+ u32 queue_index;
+ int (*poll_one)(struct hns_nic_ring_data *, int, void *);
+ void (*ex_process)(struct hns_nic_ring_data *, struct sk_buff *);
+ bool (*fini_process)(struct hns_nic_ring_data *);
+};
+
+/* compatible the difference between two versions */
+struct hns_nic_ops {
+ void (*fill_desc)(struct hnae_ring *ring, void *priv,
+ int size, dma_addr_t dma, int frag_end,
+ int buf_num, enum hns_desc_type type, int mtu,
+ bool is_gso);
+ int (*maybe_stop_tx)(struct sk_buff **out_skb,
+ int *bnum, struct hnae_ring *ring);
+ void (*get_rxd_bnum)(u32 bnum_flag, int *out_bnum);
+};
+
+struct hns_nic_priv {
+ const struct fwnode_handle *fwnode;
+ u32 enet_ver;
+ u32 port_id;
+ int phy_mode;
+ int phy_led_val;
+ struct net_device *netdev;
+ struct device *dev;
+ struct hnae_handle *ae_handle;
+
+ struct hns_nic_ops ops;
+
+ /* the cb for nic to manage the ring buffer, the first half of the
+ * array is for tx_ring and vice versa for the second half
+ */
+ struct hns_nic_ring_data *ring_data;
+
+ /* The most recently read link state */
+ int link;
+ u64 tx_timeout_count;
+
+ unsigned long state;
+
+ struct timer_list service_timer;
+
+ struct work_struct service_task;
+
+ struct notifier_block notifier_block;
+};
+
+#define tx_ring_data(priv, idx) ((priv)->ring_data[idx])
+#define rx_ring_data(priv, idx) \
+ ((priv)->ring_data[(priv)->ae_handle->q_num + (idx)])
+
+void hns_ethtool_set_ops(struct net_device *ndev);
+void hns_nic_net_reset(struct net_device *ndev);
+void hns_nic_net_reinit(struct net_device *netdev);
+int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h);
+netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
+ struct sk_buff *skb,
+ struct hns_nic_ring_data *ring_data);
+
+#endif /**__HNS_ENET_H */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
new file mode 100644
index 0000000000..b54f3706fb
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -0,0 +1,1283 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include "hns_enet.h"
+
+#define HNS_PHY_PAGE_MDIX 0
+#define HNS_PHY_PAGE_LED 3
+#define HNS_PHY_PAGE_COPPER 0
+
+#define HNS_PHY_PAGE_REG 22 /* Page Selection Reg. */
+#define HNS_PHY_CSC_REG 16 /* Copper Specific Control Register */
+#define HNS_PHY_CSS_REG 17 /* Copper Specific Status Register */
+#define HNS_LED_FC_REG 16 /* LED Function Control Reg. */
+
+#define HNS_LED_FORCE_ON 9
+#define HNS_LED_FORCE_OFF 8
+
+#define HNS_CHIP_VERSION 660
+#define HNS_NET_STATS_CNT 26
+
+#define PHY_MDIX_CTRL_S (5)
+#define PHY_MDIX_CTRL_M (3 << PHY_MDIX_CTRL_S)
+
+#define PHY_MDIX_STATUS_B (6)
+#define PHY_SPEED_DUP_RESOLVE_B (11)
+
+/**
+ *hns_nic_get_link - get current link status
+ *@net_dev: net_device
+ *retuen 0 - success , negative --fail
+ */
+static u32 hns_nic_get_link(struct net_device *net_dev)
+{
+ struct hns_nic_priv *priv = netdev_priv(net_dev);
+ u32 link_stat = priv->link;
+ struct hnae_handle *h;
+
+ h = priv->ae_handle;
+
+ if (net_dev->phydev) {
+ if (!genphy_read_status(net_dev->phydev))
+ link_stat = net_dev->phydev->link;
+ else
+ link_stat = 0;
+ }
+
+ if (h->dev && h->dev->ops && h->dev->ops->get_status)
+ link_stat = link_stat && h->dev->ops->get_status(h);
+ else
+ link_stat = 0;
+
+ return link_stat;
+}
+
+static void hns_get_mdix_mode(struct net_device *net_dev,
+ struct ethtool_link_ksettings *cmd)
+{
+ int mdix_ctrl, mdix, retval, is_resolved;
+ struct phy_device *phy_dev = net_dev->phydev;
+
+ if (!phy_dev || !phy_dev->mdio.bus) {
+ cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;
+ cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
+ return;
+ }
+
+ phy_write(phy_dev, HNS_PHY_PAGE_REG, HNS_PHY_PAGE_MDIX);
+
+ retval = phy_read(phy_dev, HNS_PHY_CSC_REG);
+ mdix_ctrl = hnae_get_field(retval, PHY_MDIX_CTRL_M, PHY_MDIX_CTRL_S);
+
+ retval = phy_read(phy_dev, HNS_PHY_CSS_REG);
+ mdix = hnae_get_bit(retval, PHY_MDIX_STATUS_B);
+ is_resolved = hnae_get_bit(retval, PHY_SPEED_DUP_RESOLVE_B);
+
+ phy_write(phy_dev, HNS_PHY_PAGE_REG, HNS_PHY_PAGE_COPPER);
+
+ switch (mdix_ctrl) {
+ case 0x0:
+ cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI;
+ break;
+ case 0x1:
+ cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_X;
+ break;
+ case 0x3:
+ cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
+ break;
+ default:
+ cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;
+ break;
+ }
+
+ if (!is_resolved)
+ cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
+ else if (mdix)
+ cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
+ else
+ cmd->base.eth_tp_mdix = ETH_TP_MDI;
+}
+
+/**
+ *hns_nic_get_link_ksettings - implement ethtool get link ksettings
+ *@net_dev: net_device
+ *@cmd: ethtool_link_ksettings
+ *retuen 0 - success , negative --fail
+ */
+static int hns_nic_get_link_ksettings(struct net_device *net_dev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct hns_nic_priv *priv = netdev_priv(net_dev);
+ struct hnae_handle *h;
+ u32 link_stat;
+ int ret;
+ u8 duplex;
+ u16 speed;
+ u32 supported, advertising;
+
+ if (!priv || !priv->ae_handle)
+ return -ESRCH;
+
+ h = priv->ae_handle;
+ if (!h->dev || !h->dev->ops || !h->dev->ops->get_info)
+ return -ESRCH;
+
+ ret = h->dev->ops->get_info(h, NULL, &speed, &duplex);
+ if (ret < 0) {
+ netdev_err(net_dev, "%s get_info error!\n", __func__);
+ return -EINVAL;
+ }
+
+ ethtool_convert_link_mode_to_legacy_u32(&supported,
+ cmd->link_modes.supported);
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
+
+ /* When there is no phy, autoneg is off. */
+ cmd->base.autoneg = false;
+ cmd->base.speed = speed;
+ cmd->base.duplex = duplex;
+
+ if (net_dev->phydev)
+ phy_ethtool_ksettings_get(net_dev->phydev, cmd);
+
+ link_stat = hns_nic_get_link(net_dev);
+ if (!link_stat) {
+ cmd->base.speed = (u32)SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
+ }
+
+ if (cmd->base.autoneg)
+ advertising |= ADVERTISED_Autoneg;
+
+ supported |= h->if_support;
+ if (h->phy_if == PHY_INTERFACE_MODE_SGMII) {
+ supported |= SUPPORTED_TP;
+ advertising |= ADVERTISED_1000baseT_Full;
+ } else if (h->phy_if == PHY_INTERFACE_MODE_XGMII) {
+ supported |= SUPPORTED_FIBRE;
+ advertising |= ADVERTISED_10000baseKR_Full;
+ }
+
+ switch (h->media_type) {
+ case HNAE_MEDIA_TYPE_FIBER:
+ cmd->base.port = PORT_FIBRE;
+ break;
+ case HNAE_MEDIA_TYPE_COPPER:
+ cmd->base.port = PORT_TP;
+ break;
+ case HNAE_MEDIA_TYPE_UNKNOWN:
+ default:
+ break;
+ }
+
+ if (!(AE_IS_VER1(priv->enet_ver) && h->port_type == HNAE_PORT_DEBUG))
+ supported |= SUPPORTED_Pause;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
+
+ cmd->base.mdio_support = ETH_MDIO_SUPPORTS_C45 | ETH_MDIO_SUPPORTS_C22;
+ hns_get_mdix_mode(net_dev, cmd);
+
+ return 0;
+}
+
+/**
+ *hns_nic_set_link_ksettings - implement ethtool set link ksettings
+ *@net_dev: net_device
+ *@cmd: ethtool_link_ksettings
+ *retuen 0 - success , negative --fail
+ */
+static int hns_nic_set_link_ksettings(struct net_device *net_dev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct hns_nic_priv *priv = netdev_priv(net_dev);
+ struct hnae_handle *h;
+ u32 speed;
+
+ if (!netif_running(net_dev))
+ return -ESRCH;
+
+ if (!priv || !priv->ae_handle || !priv->ae_handle->dev ||
+ !priv->ae_handle->dev->ops)
+ return -ENODEV;
+
+ h = priv->ae_handle;
+ speed = cmd->base.speed;
+
+ if (h->phy_if == PHY_INTERFACE_MODE_XGMII) {
+ if (cmd->base.autoneg == AUTONEG_ENABLE ||
+ speed != SPEED_10000 ||
+ cmd->base.duplex != DUPLEX_FULL)
+ return -EINVAL;
+ } else if (h->phy_if == PHY_INTERFACE_MODE_SGMII) {
+ if (!net_dev->phydev && cmd->base.autoneg == AUTONEG_ENABLE)
+ return -EINVAL;
+
+ if (speed == SPEED_1000 && cmd->base.duplex == DUPLEX_HALF)
+ return -EINVAL;
+ if (net_dev->phydev)
+ return phy_ethtool_ksettings_set(net_dev->phydev, cmd);
+
+ if ((speed != SPEED_10 && speed != SPEED_100 &&
+ speed != SPEED_1000) || (cmd->base.duplex != DUPLEX_HALF &&
+ cmd->base.duplex != DUPLEX_FULL))
+ return -EINVAL;
+ } else {
+ netdev_err(net_dev, "Not supported!");
+ return -ENOTSUPP;
+ }
+
+ if (h->dev->ops->adjust_link) {
+ netif_carrier_off(net_dev);
+ h->dev->ops->adjust_link(h, (int)speed, cmd->base.duplex);
+ netif_carrier_on(net_dev);
+ return 0;
+ }
+
+ netdev_err(net_dev, "Not supported!");
+ return -ENOTSUPP;
+}
+
+static const char hns_nic_test_strs[][ETH_GSTRING_LEN] = {
+ "Mac Loopback test",
+ "Serdes Loopback test",
+ "Phy Loopback test"
+};
+
+static int hns_nic_config_phy_loopback(struct phy_device *phy_dev, u8 en)
+{
+ int err;
+
+ if (en) {
+ /* Doing phy loopback in offline state, phy resuming is
+ * needed to power up the device.
+ */
+ err = phy_resume(phy_dev);
+ if (err)
+ goto out;
+
+ err = phy_loopback(phy_dev, true);
+ } else {
+ err = phy_loopback(phy_dev, false);
+ if (err)
+ goto out;
+
+ err = phy_suspend(phy_dev);
+ }
+
+out:
+ return err;
+}
+
+static int __lb_setup(struct net_device *ndev,
+ enum hnae_loop loop)
+{
+ int ret = 0;
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct phy_device *phy_dev = ndev->phydev;
+ struct hnae_handle *h = priv->ae_handle;
+
+ switch (loop) {
+ case MAC_INTERNALLOOP_PHY:
+ ret = hns_nic_config_phy_loopback(phy_dev, 0x1);
+ if (!ret)
+ ret = h->dev->ops->set_loopback(h, loop, 0x1);
+ break;
+ case MAC_INTERNALLOOP_MAC:
+ if ((h->dev->ops->set_loopback) &&
+ (priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII))
+ ret = h->dev->ops->set_loopback(h, loop, 0x1);
+ break;
+ case MAC_INTERNALLOOP_SERDES:
+ if (h->dev->ops->set_loopback)
+ ret = h->dev->ops->set_loopback(h, loop, 0x1);
+ break;
+ case MAC_LOOP_PHY_NONE:
+ ret = hns_nic_config_phy_loopback(phy_dev, 0x0);
+ fallthrough;
+ case MAC_LOOP_NONE:
+ if (!ret && h->dev->ops->set_loopback) {
+ if (priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII)
+ ret = h->dev->ops->set_loopback(h,
+ MAC_INTERNALLOOP_MAC, 0x0);
+
+ if (!ret)
+ ret = h->dev->ops->set_loopback(h,
+ MAC_INTERNALLOOP_SERDES, 0x0);
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ if (!ret) {
+ if (loop == MAC_LOOP_NONE)
+ h->dev->ops->set_promisc_mode(
+ h, ndev->flags & IFF_PROMISC);
+ else
+ h->dev->ops->set_promisc_mode(h, 1);
+ }
+ return ret;
+}
+
+static int __lb_up(struct net_device *ndev,
+ enum hnae_loop loop_mode)
+{
+#define NIC_LB_TEST_WAIT_PHY_LINK_TIME 300
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct hnae_handle *h = priv->ae_handle;
+ int speed, duplex;
+ int ret;
+
+ hns_nic_net_reset(ndev);
+
+ ret = __lb_setup(ndev, loop_mode);
+ if (ret)
+ return ret;
+
+ msleep(200);
+
+ ret = h->dev->ops->start ? h->dev->ops->start(h) : 0;
+ if (ret)
+ return ret;
+
+ /* link adjust duplex*/
+ if (priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII)
+ speed = 1000;
+ else
+ speed = 10000;
+ duplex = 1;
+
+ h->dev->ops->adjust_link(h, speed, duplex);
+
+ /* wait adjust link done and phy ready */
+ msleep(NIC_LB_TEST_WAIT_PHY_LINK_TIME);
+
+ return 0;
+}
+
+static void __lb_other_process(struct hns_nic_ring_data *ring_data,
+ struct sk_buff *skb)
+{
+ struct net_device *ndev;
+ struct hns_nic_priv *priv;
+ struct hnae_ring *ring;
+ struct netdev_queue *dev_queue;
+ struct sk_buff *new_skb;
+ unsigned int frame_size;
+ int check_ok;
+ u32 i;
+ char buff[33]; /* 32B data and the last character '\0' */
+
+ if (!ring_data) { /* Just for doing create frame*/
+ ndev = skb->dev;
+ priv = netdev_priv(ndev);
+
+ frame_size = skb->len;
+ memset(skb->data, 0xFF, frame_size);
+ if ((!AE_IS_VER1(priv->enet_ver)) &&
+ (priv->ae_handle->port_type == HNAE_PORT_SERVICE)) {
+ memcpy(skb->data, ndev->dev_addr, 6);
+ skb->data[5] += 0x1f;
+ }
+
+ frame_size &= ~1ul;
+ memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
+ memset(&skb->data[frame_size / 2 + 10], 0xBE,
+ frame_size / 2 - 11);
+ memset(&skb->data[frame_size / 2 + 12], 0xAF,
+ frame_size / 2 - 13);
+ return;
+ }
+
+ ring = ring_data->ring;
+ ndev = ring_data->napi.dev;
+ if (is_tx_ring(ring)) { /* for tx queue reset*/
+ dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
+ netdev_tx_reset_queue(dev_queue);
+ return;
+ }
+
+ frame_size = skb->len;
+ frame_size &= ~1ul;
+ /* for mutl buffer*/
+ new_skb = skb_copy(skb, GFP_ATOMIC);
+ dev_kfree_skb_any(skb);
+ if (!new_skb) {
+ netdev_err(ndev, "skb alloc failed\n");
+ return;
+ }
+ skb = new_skb;
+
+ check_ok = 0;
+ if (*(skb->data + 10) == 0xFF) { /* for rx check frame*/
+ if ((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
+ (*(skb->data + frame_size / 2 + 12) == 0xAF))
+ check_ok = 1;
+ }
+
+ if (check_ok) {
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += skb->len;
+ } else {
+ ndev->stats.rx_frame_errors++;
+ for (i = 0; i < skb->len; i++) {
+ snprintf(buff + i % 16 * 2, 3, /* tailing \0*/
+ "%02x", *(skb->data + i));
+ if ((i % 16 == 15) || (i == skb->len - 1))
+ pr_info("%s\n", buff);
+ }
+ }
+ dev_kfree_skb_any(skb);
+}
+
+static int __lb_clean_rings(struct hns_nic_priv *priv,
+ int ringid0, int ringid1, int budget)
+{
+ int i, ret;
+ struct hns_nic_ring_data *ring_data;
+ struct net_device *ndev = priv->netdev;
+ unsigned long rx_packets = ndev->stats.rx_packets;
+ unsigned long rx_bytes = ndev->stats.rx_bytes;
+ unsigned long rx_frame_errors = ndev->stats.rx_frame_errors;
+
+ for (i = ringid0; i <= ringid1; i++) {
+ ring_data = &priv->ring_data[i];
+ (void)ring_data->poll_one(ring_data,
+ budget, __lb_other_process);
+ }
+ ret = (int)(ndev->stats.rx_packets - rx_packets);
+ ndev->stats.rx_packets = rx_packets;
+ ndev->stats.rx_bytes = rx_bytes;
+ ndev->stats.rx_frame_errors = rx_frame_errors;
+ return ret;
+}
+
+/**
+ * __lb_run_test - run loopback test
+ * @ndev: net device
+ * @loop_mode: loopback mode
+ */
+static int __lb_run_test(struct net_device *ndev,
+ enum hnae_loop loop_mode)
+{
+#define NIC_LB_TEST_PKT_NUM_PER_CYCLE 1
+#define NIC_LB_TEST_RING_ID 0
+#define NIC_LB_TEST_FRAME_SIZE 128
+/* nic loopback test err */
+#define NIC_LB_TEST_NO_MEM_ERR 1
+#define NIC_LB_TEST_TX_CNT_ERR 2
+#define NIC_LB_TEST_RX_CNT_ERR 3
+
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct hnae_handle *h = priv->ae_handle;
+ int i, j, lc, good_cnt, ret_val = 0;
+ unsigned int size;
+ netdev_tx_t tx_ret_val;
+ struct sk_buff *skb;
+
+ size = NIC_LB_TEST_FRAME_SIZE;
+ /* allocate test skb */
+ skb = alloc_skb(size, GFP_KERNEL);
+ if (!skb)
+ return NIC_LB_TEST_NO_MEM_ERR;
+
+ /* place data into test skb */
+ (void)skb_put(skb, size);
+ skb->dev = ndev;
+ __lb_other_process(NULL, skb);
+ skb->queue_mapping = NIC_LB_TEST_RING_ID;
+
+ lc = 1;
+ for (j = 0; j < lc; j++) {
+ /* reset count of good packets */
+ good_cnt = 0;
+ /* place 64 packets on the transmit queue*/
+ for (i = 0; i < NIC_LB_TEST_PKT_NUM_PER_CYCLE; i++) {
+ (void)skb_get(skb);
+
+ tx_ret_val = (netdev_tx_t)hns_nic_net_xmit_hw(
+ ndev, skb,
+ &tx_ring_data(priv, skb->queue_mapping));
+ if (tx_ret_val == NETDEV_TX_OK)
+ good_cnt++;
+ else
+ break;
+ }
+ if (good_cnt != NIC_LB_TEST_PKT_NUM_PER_CYCLE) {
+ ret_val = NIC_LB_TEST_TX_CNT_ERR;
+ dev_err(priv->dev, "%s sent fail, cnt=0x%x, budget=0x%x\n",
+ hns_nic_test_strs[loop_mode], good_cnt,
+ NIC_LB_TEST_PKT_NUM_PER_CYCLE);
+ break;
+ }
+
+ /* allow 100 milliseconds for packets to go from Tx to Rx */
+ msleep(100);
+
+ good_cnt = __lb_clean_rings(priv,
+ h->q_num, h->q_num * 2 - 1,
+ NIC_LB_TEST_PKT_NUM_PER_CYCLE);
+ if (good_cnt != NIC_LB_TEST_PKT_NUM_PER_CYCLE) {
+ ret_val = NIC_LB_TEST_RX_CNT_ERR;
+ dev_err(priv->dev, "%s recv fail, cnt=0x%x, budget=0x%x\n",
+ hns_nic_test_strs[loop_mode], good_cnt,
+ NIC_LB_TEST_PKT_NUM_PER_CYCLE);
+ break;
+ }
+ (void)__lb_clean_rings(priv,
+ NIC_LB_TEST_RING_ID, NIC_LB_TEST_RING_ID,
+ NIC_LB_TEST_PKT_NUM_PER_CYCLE);
+ }
+
+ /* free the original skb */
+ kfree_skb(skb);
+
+ return ret_val;
+}
+
+static int __lb_down(struct net_device *ndev, enum hnae_loop loop)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct hnae_handle *h = priv->ae_handle;
+ int ret;
+
+ if (loop == MAC_INTERNALLOOP_PHY)
+ ret = __lb_setup(ndev, MAC_LOOP_PHY_NONE);
+ else
+ ret = __lb_setup(ndev, MAC_LOOP_NONE);
+ if (ret)
+ netdev_err(ndev, "%s: __lb_setup return error(%d)!\n",
+ __func__,
+ ret);
+
+ if (h->dev->ops->stop)
+ h->dev->ops->stop(h);
+
+ usleep_range(10000, 20000);
+ (void)__lb_clean_rings(priv, 0, h->q_num - 1, 256);
+
+ hns_nic_net_reset(ndev);
+
+ return 0;
+}
+
+/**
+ * hns_nic_self_test - self test
+ * @ndev: net device
+ * @eth_test: test cmd
+ * @data: test result
+ */
+static void hns_nic_self_test(struct net_device *ndev,
+ struct ethtool_test *eth_test, u64 *data)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ bool if_running = netif_running(ndev);
+#define SELF_TEST_TPYE_NUM 3
+ int st_param[SELF_TEST_TPYE_NUM][2];
+ int i;
+ int test_index = 0;
+
+ st_param[0][0] = MAC_INTERNALLOOP_MAC; /* XGE not supported lb */
+ st_param[0][1] = (priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII);
+ st_param[1][0] = MAC_INTERNALLOOP_SERDES;
+ st_param[1][1] = 1; /*serdes must exist*/
+ st_param[2][0] = MAC_INTERNALLOOP_PHY; /* only supporte phy node*/
+ st_param[2][1] = ((!!(priv->ae_handle->phy_dev)) &&
+ (priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII));
+
+ if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
+ set_bit(NIC_STATE_TESTING, &priv->state);
+
+ if (if_running)
+ dev_close(ndev);
+
+ for (i = 0; i < SELF_TEST_TPYE_NUM; i++) {
+ if (!st_param[i][1])
+ continue; /* NEXT testing */
+
+ data[test_index] = __lb_up(ndev,
+ (enum hnae_loop)st_param[i][0]);
+ if (!data[test_index]) {
+ data[test_index] = __lb_run_test(
+ ndev, (enum hnae_loop)st_param[i][0]);
+ (void)__lb_down(ndev,
+ (enum hnae_loop)st_param[i][0]);
+ }
+
+ if (data[test_index])
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ test_index++;
+ }
+
+ hns_nic_net_reset(priv->netdev);
+
+ clear_bit(NIC_STATE_TESTING, &priv->state);
+
+ if (if_running)
+ (void)dev_open(ndev, NULL);
+ }
+ /* Online tests aren't run; pass by default */
+
+ (void)msleep_interruptible(4 * 1000);
+}
+
+/**
+ * hns_nic_get_drvinfo - get net driver info
+ * @net_dev: net device
+ * @drvinfo: driver info
+ */
+static void hns_nic_get_drvinfo(struct net_device *net_dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct hns_nic_priv *priv = netdev_priv(net_dev);
+
+ strscpy(drvinfo->version, HNAE_DRIVER_VERSION,
+ sizeof(drvinfo->version));
+
+ strscpy(drvinfo->driver, HNAE_DRIVER_NAME, sizeof(drvinfo->driver));
+
+ strscpy(drvinfo->bus_info, priv->dev->bus->name,
+ sizeof(drvinfo->bus_info));
+
+ strscpy(drvinfo->fw_version, "N/A", ETHTOOL_FWVERS_LEN);
+ drvinfo->eedump_len = 0;
+}
+
+/**
+ * hns_get_ringparam - get ring parameter
+ * @net_dev: net device
+ * @param: ethtool parameter
+ * @kernel_param: ethtool external parameter
+ * @extack: netlink extended ACK report struct
+ */
+static void hns_get_ringparam(struct net_device *net_dev,
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param,
+ struct netlink_ext_ack *extack)
+{
+ struct hns_nic_priv *priv = netdev_priv(net_dev);
+ struct hnae_ae_ops *ops;
+ struct hnae_queue *queue;
+ u32 uplimit = 0;
+
+ queue = priv->ae_handle->qs[0];
+ ops = priv->ae_handle->dev->ops;
+
+ if (ops->get_ring_bdnum_limit)
+ ops->get_ring_bdnum_limit(queue, &uplimit);
+
+ param->rx_max_pending = uplimit;
+ param->tx_max_pending = uplimit;
+ param->rx_pending = queue->rx_ring.desc_num;
+ param->tx_pending = queue->tx_ring.desc_num;
+}
+
+/**
+ * hns_get_pauseparam - get pause parameter
+ * @net_dev: net device
+ * @param: pause parameter
+ */
+static void hns_get_pauseparam(struct net_device *net_dev,
+ struct ethtool_pauseparam *param)
+{
+ struct hns_nic_priv *priv = netdev_priv(net_dev);
+ struct hnae_ae_ops *ops;
+
+ ops = priv->ae_handle->dev->ops;
+
+ if (ops->get_pauseparam)
+ ops->get_pauseparam(priv->ae_handle, &param->autoneg,
+ &param->rx_pause, &param->tx_pause);
+}
+
+/**
+ * hns_set_pauseparam - set pause parameter
+ * @net_dev: net device
+ * @param: pause parameter
+ *
+ * Return 0 on success, negative on failure
+ */
+static int hns_set_pauseparam(struct net_device *net_dev,
+ struct ethtool_pauseparam *param)
+{
+ struct hns_nic_priv *priv = netdev_priv(net_dev);
+ struct hnae_handle *h;
+ struct hnae_ae_ops *ops;
+
+ h = priv->ae_handle;
+ ops = h->dev->ops;
+
+ if (!ops->set_pauseparam)
+ return -ESRCH;
+
+ return ops->set_pauseparam(priv->ae_handle, param->autoneg,
+ param->rx_pause, param->tx_pause);
+}
+
+/**
+ * hns_get_coalesce - get coalesce info.
+ * @net_dev: net device
+ * @ec: coalesce info.
+ * @kernel_coal: ethtool CQE mode setting structure
+ * @extack: extack for reporting error messages
+ *
+ * Return 0 on success, negative on failure.
+ */
+static int hns_get_coalesce(struct net_device *net_dev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct hns_nic_priv *priv = netdev_priv(net_dev);
+ struct hnae_ae_ops *ops;
+
+ ops = priv->ae_handle->dev->ops;
+
+ ec->use_adaptive_rx_coalesce = priv->ae_handle->coal_adapt_en;
+ ec->use_adaptive_tx_coalesce = priv->ae_handle->coal_adapt_en;
+
+ if ((!ops->get_coalesce_usecs) ||
+ (!ops->get_max_coalesced_frames))
+ return -ESRCH;
+
+ ops->get_coalesce_usecs(priv->ae_handle,
+ &ec->tx_coalesce_usecs,
+ &ec->rx_coalesce_usecs);
+
+ ops->get_max_coalesced_frames(
+ priv->ae_handle,
+ &ec->tx_max_coalesced_frames,
+ &ec->rx_max_coalesced_frames);
+
+ ops->get_coalesce_range(priv->ae_handle,
+ &ec->tx_max_coalesced_frames_low,
+ &ec->rx_max_coalesced_frames_low,
+ &ec->tx_max_coalesced_frames_high,
+ &ec->rx_max_coalesced_frames_high,
+ &ec->tx_coalesce_usecs_low,
+ &ec->rx_coalesce_usecs_low,
+ &ec->tx_coalesce_usecs_high,
+ &ec->rx_coalesce_usecs_high);
+
+ return 0;
+}
+
+/**
+ * hns_set_coalesce - set coalesce info.
+ * @net_dev: net device
+ * @ec: coalesce info.
+ * @kernel_coal: ethtool CQE mode setting structure
+ * @extack: extack for reporting error messages
+ *
+ * Return 0 on success, negative on failure.
+ */
+static int hns_set_coalesce(struct net_device *net_dev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct hns_nic_priv *priv = netdev_priv(net_dev);
+ struct hnae_ae_ops *ops;
+ int rc1, rc2;
+
+ ops = priv->ae_handle->dev->ops;
+
+ if (ec->tx_coalesce_usecs != ec->rx_coalesce_usecs)
+ return -EINVAL;
+
+ if ((!ops->set_coalesce_usecs) ||
+ (!ops->set_coalesce_frames))
+ return -ESRCH;
+
+ if (ec->use_adaptive_rx_coalesce != priv->ae_handle->coal_adapt_en)
+ priv->ae_handle->coal_adapt_en = ec->use_adaptive_rx_coalesce;
+
+ rc1 = ops->set_coalesce_usecs(priv->ae_handle,
+ ec->rx_coalesce_usecs);
+
+ rc2 = ops->set_coalesce_frames(priv->ae_handle,
+ ec->tx_max_coalesced_frames,
+ ec->rx_max_coalesced_frames);
+
+ if (rc1 || rc2)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * hns_get_channels - get channel info.
+ * @net_dev: net device
+ * @ch: channel info.
+ */
+static void
+hns_get_channels(struct net_device *net_dev, struct ethtool_channels *ch)
+{
+ struct hns_nic_priv *priv = netdev_priv(net_dev);
+
+ ch->max_rx = priv->ae_handle->q_num;
+ ch->max_tx = priv->ae_handle->q_num;
+
+ ch->rx_count = priv->ae_handle->q_num;
+ ch->tx_count = priv->ae_handle->q_num;
+}
+
+/**
+ * hns_get_ethtool_stats - get detail statistics.
+ * @netdev: net device
+ * @stats: statistics info.
+ * @data: statistics data.
+ */
+static void hns_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ u64 *p = data;
+ struct hns_nic_priv *priv = netdev_priv(netdev);
+ struct hnae_handle *h = priv->ae_handle;
+ const struct rtnl_link_stats64 *net_stats;
+ struct rtnl_link_stats64 temp;
+
+ if (!h->dev->ops->get_stats || !h->dev->ops->update_stats) {
+ netdev_err(netdev, "get_stats or update_stats is null!\n");
+ return;
+ }
+
+ h->dev->ops->update_stats(h, &netdev->stats);
+
+ net_stats = dev_get_stats(netdev, &temp);
+
+ /* get netdev statistics */
+ p[0] = net_stats->rx_packets;
+ p[1] = net_stats->tx_packets;
+ p[2] = net_stats->rx_bytes;
+ p[3] = net_stats->tx_bytes;
+ p[4] = net_stats->rx_errors;
+ p[5] = net_stats->tx_errors;
+ p[6] = net_stats->rx_dropped;
+ p[7] = net_stats->tx_dropped;
+ p[8] = net_stats->multicast;
+ p[9] = net_stats->collisions;
+ p[10] = net_stats->rx_over_errors;
+ p[11] = net_stats->rx_crc_errors;
+ p[12] = net_stats->rx_frame_errors;
+ p[13] = net_stats->rx_fifo_errors;
+ p[14] = net_stats->rx_missed_errors;
+ p[15] = net_stats->tx_aborted_errors;
+ p[16] = net_stats->tx_carrier_errors;
+ p[17] = net_stats->tx_fifo_errors;
+ p[18] = net_stats->tx_heartbeat_errors;
+ p[19] = net_stats->rx_length_errors;
+ p[20] = net_stats->tx_window_errors;
+ p[21] = net_stats->rx_compressed;
+ p[22] = net_stats->tx_compressed;
+
+ p[23] = 0; /* was netdev->rx_dropped.counter */
+ p[24] = 0; /* was netdev->tx_dropped.counter */
+
+ p[25] = priv->tx_timeout_count;
+
+ /* get driver statistics */
+ h->dev->ops->get_stats(h, &p[26]);
+}
+
+/**
+ * hns_get_strings: Return a set of strings that describe the requested objects
+ * @netdev: net device
+ * @stringset: string set ID.
+ * @data: objects data.
+ */
+static void hns_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+{
+ struct hns_nic_priv *priv = netdev_priv(netdev);
+ struct hnae_handle *h = priv->ae_handle;
+ u8 *buff = data;
+
+ if (!h->dev->ops->get_strings) {
+ netdev_err(netdev, "h->dev->ops->get_strings is null!\n");
+ return;
+ }
+
+ if (stringset == ETH_SS_TEST) {
+ if (priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII)
+ ethtool_sprintf(&buff,
+ hns_nic_test_strs[MAC_INTERNALLOOP_MAC]);
+ ethtool_sprintf(&buff,
+ hns_nic_test_strs[MAC_INTERNALLOOP_SERDES]);
+ if ((netdev->phydev) && (!netdev->phydev->is_c45))
+ ethtool_sprintf(&buff,
+ hns_nic_test_strs[MAC_INTERNALLOOP_PHY]);
+
+ } else {
+ ethtool_sprintf(&buff, "rx_packets");
+ ethtool_sprintf(&buff, "tx_packets");
+ ethtool_sprintf(&buff, "rx_bytes");
+ ethtool_sprintf(&buff, "tx_bytes");
+ ethtool_sprintf(&buff, "rx_errors");
+ ethtool_sprintf(&buff, "tx_errors");
+ ethtool_sprintf(&buff, "rx_dropped");
+ ethtool_sprintf(&buff, "tx_dropped");
+ ethtool_sprintf(&buff, "multicast");
+ ethtool_sprintf(&buff, "collisions");
+ ethtool_sprintf(&buff, "rx_over_errors");
+ ethtool_sprintf(&buff, "rx_crc_errors");
+ ethtool_sprintf(&buff, "rx_frame_errors");
+ ethtool_sprintf(&buff, "rx_fifo_errors");
+ ethtool_sprintf(&buff, "rx_missed_errors");
+ ethtool_sprintf(&buff, "tx_aborted_errors");
+ ethtool_sprintf(&buff, "tx_carrier_errors");
+ ethtool_sprintf(&buff, "tx_fifo_errors");
+ ethtool_sprintf(&buff, "tx_heartbeat_errors");
+ ethtool_sprintf(&buff, "rx_length_errors");
+ ethtool_sprintf(&buff, "tx_window_errors");
+ ethtool_sprintf(&buff, "rx_compressed");
+ ethtool_sprintf(&buff, "tx_compressed");
+ ethtool_sprintf(&buff, "netdev_rx_dropped");
+ ethtool_sprintf(&buff, "netdev_tx_dropped");
+
+ ethtool_sprintf(&buff, "netdev_tx_timeout");
+
+ h->dev->ops->get_strings(h, stringset, buff);
+ }
+}
+
+/**
+ * hns_get_sset_count - get string set count returned by nic_get_strings
+ * @netdev: net device
+ * @stringset: string set index, 0: self test string; 1: statistics string.
+ *
+ * Return string set count.
+ */
+static int hns_get_sset_count(struct net_device *netdev, int stringset)
+{
+ struct hns_nic_priv *priv = netdev_priv(netdev);
+ struct hnae_handle *h = priv->ae_handle;
+ struct hnae_ae_ops *ops = h->dev->ops;
+
+ if (!ops->get_sset_count) {
+ netdev_err(netdev, "get_sset_count is null!\n");
+ return -EOPNOTSUPP;
+ }
+ if (stringset == ETH_SS_TEST) {
+ u32 cnt = (sizeof(hns_nic_test_strs) / ETH_GSTRING_LEN);
+
+ if (priv->ae_handle->phy_if == PHY_INTERFACE_MODE_XGMII)
+ cnt--;
+
+ if ((!netdev->phydev) || (netdev->phydev->is_c45))
+ cnt--;
+
+ return cnt;
+ } else if (stringset == ETH_SS_STATS) {
+ return (HNS_NET_STATS_CNT + ops->get_sset_count(h, stringset));
+ } else {
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
+ * hns_phy_led_set - set phy LED status.
+ * @netdev: net device
+ * @value: LED state.
+ *
+ * Return 0 on success, negative on failure.
+ */
+static int hns_phy_led_set(struct net_device *netdev, int value)
+{
+ int retval;
+ struct phy_device *phy_dev = netdev->phydev;
+
+ retval = phy_write(phy_dev, HNS_PHY_PAGE_REG, HNS_PHY_PAGE_LED);
+ retval |= phy_write(phy_dev, HNS_LED_FC_REG, value);
+ retval |= phy_write(phy_dev, HNS_PHY_PAGE_REG, HNS_PHY_PAGE_COPPER);
+ if (retval) {
+ netdev_err(netdev, "mdiobus_write fail !\n");
+ return retval;
+ }
+ return 0;
+}
+
+/**
+ * hns_set_phys_id - set phy identify LED.
+ * @netdev: net device
+ * @state: LED state.
+ *
+ * Return 0 on success, negative on failure.
+ */
+static int
+hns_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state)
+{
+ struct hns_nic_priv *priv = netdev_priv(netdev);
+ struct hnae_handle *h = priv->ae_handle;
+ struct phy_device *phy_dev = netdev->phydev;
+ int ret;
+
+ if (phy_dev)
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ ret = phy_write(phy_dev, HNS_PHY_PAGE_REG,
+ HNS_PHY_PAGE_LED);
+ if (ret)
+ return ret;
+
+ priv->phy_led_val = phy_read(phy_dev, HNS_LED_FC_REG);
+
+ ret = phy_write(phy_dev, HNS_PHY_PAGE_REG,
+ HNS_PHY_PAGE_COPPER);
+ if (ret)
+ return ret;
+ return 2;
+ case ETHTOOL_ID_ON:
+ ret = hns_phy_led_set(netdev, HNS_LED_FORCE_ON);
+ if (ret)
+ return ret;
+ break;
+ case ETHTOOL_ID_OFF:
+ ret = hns_phy_led_set(netdev, HNS_LED_FORCE_OFF);
+ if (ret)
+ return ret;
+ break;
+ case ETHTOOL_ID_INACTIVE:
+ ret = phy_write(phy_dev, HNS_PHY_PAGE_REG,
+ HNS_PHY_PAGE_LED);
+ if (ret)
+ return ret;
+
+ ret = phy_write(phy_dev, HNS_LED_FC_REG,
+ priv->phy_led_val);
+ if (ret)
+ return ret;
+
+ ret = phy_write(phy_dev, HNS_PHY_PAGE_REG,
+ HNS_PHY_PAGE_COPPER);
+ if (ret)
+ return ret;
+ break;
+ default:
+ return -EINVAL;
+ }
+ else
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ return h->dev->ops->set_led_id(h, HNAE_LED_ACTIVE);
+ case ETHTOOL_ID_ON:
+ return h->dev->ops->set_led_id(h, HNAE_LED_ON);
+ case ETHTOOL_ID_OFF:
+ return h->dev->ops->set_led_id(h, HNAE_LED_OFF);
+ case ETHTOOL_ID_INACTIVE:
+ return h->dev->ops->set_led_id(h, HNAE_LED_INACTIVE);
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * hns_get_regs - get net device register
+ * @net_dev: net device
+ * @cmd: ethtool cmd
+ * @data: register data
+ */
+static void hns_get_regs(struct net_device *net_dev, struct ethtool_regs *cmd,
+ void *data)
+{
+ struct hns_nic_priv *priv = netdev_priv(net_dev);
+ struct hnae_ae_ops *ops;
+
+ ops = priv->ae_handle->dev->ops;
+
+ cmd->version = HNS_CHIP_VERSION;
+ if (!ops->get_regs) {
+ netdev_err(net_dev, "ops->get_regs is null!\n");
+ return;
+ }
+ ops->get_regs(priv->ae_handle, data);
+}
+
+/**
+ * hns_get_regs_len - get total register len.
+ * @net_dev: net device
+ *
+ * Return total register len.
+ */
+static int hns_get_regs_len(struct net_device *net_dev)
+{
+ u32 reg_num;
+ struct hns_nic_priv *priv = netdev_priv(net_dev);
+ struct hnae_ae_ops *ops;
+
+ ops = priv->ae_handle->dev->ops;
+ if (!ops->get_regs_len) {
+ netdev_err(net_dev, "ops->get_regs_len is null!\n");
+ return -EOPNOTSUPP;
+ }
+
+ reg_num = ops->get_regs_len(priv->ae_handle);
+ if (reg_num > 0)
+ return reg_num * sizeof(u32);
+ else
+ return reg_num; /* error code */
+}
+
+/**
+ * hns_nic_nway_reset - nway reset
+ * @netdev: net device
+ *
+ * Return 0 on success, negative on failure
+ */
+static int hns_nic_nway_reset(struct net_device *netdev)
+{
+ struct phy_device *phy = netdev->phydev;
+
+ if (!netif_running(netdev))
+ return 0;
+
+ if (!phy)
+ return -EOPNOTSUPP;
+
+ if (phy->autoneg != AUTONEG_ENABLE)
+ return -EINVAL;
+
+ return genphy_restart_aneg(phy);
+}
+
+static u32
+hns_get_rss_key_size(struct net_device *netdev)
+{
+ struct hns_nic_priv *priv = netdev_priv(netdev);
+ struct hnae_ae_ops *ops;
+
+ if (AE_IS_VER1(priv->enet_ver)) {
+ netdev_err(netdev,
+ "RSS feature is not supported on this hardware\n");
+ return 0;
+ }
+
+ ops = priv->ae_handle->dev->ops;
+ return ops->get_rss_key_size(priv->ae_handle);
+}
+
+static u32
+hns_get_rss_indir_size(struct net_device *netdev)
+{
+ struct hns_nic_priv *priv = netdev_priv(netdev);
+ struct hnae_ae_ops *ops;
+
+ if (AE_IS_VER1(priv->enet_ver)) {
+ netdev_err(netdev,
+ "RSS feature is not supported on this hardware\n");
+ return 0;
+ }
+
+ ops = priv->ae_handle->dev->ops;
+ return ops->get_rss_indir_size(priv->ae_handle);
+}
+
+static int
+hns_get_rss(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc)
+{
+ struct hns_nic_priv *priv = netdev_priv(netdev);
+ struct hnae_ae_ops *ops;
+
+ if (AE_IS_VER1(priv->enet_ver)) {
+ netdev_err(netdev,
+ "RSS feature is not supported on this hardware\n");
+ return -EOPNOTSUPP;
+ }
+
+ ops = priv->ae_handle->dev->ops;
+
+ if (!indir)
+ return 0;
+
+ return ops->get_rss(priv->ae_handle, indir, key, hfunc);
+}
+
+static int
+hns_set_rss(struct net_device *netdev, const u32 *indir, const u8 *key,
+ const u8 hfunc)
+{
+ struct hns_nic_priv *priv = netdev_priv(netdev);
+ struct hnae_ae_ops *ops;
+
+ if (AE_IS_VER1(priv->enet_ver)) {
+ netdev_err(netdev,
+ "RSS feature is not supported on this hardware\n");
+ return -EOPNOTSUPP;
+ }
+
+ ops = priv->ae_handle->dev->ops;
+
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) {
+ netdev_err(netdev, "Invalid hfunc!\n");
+ return -EOPNOTSUPP;
+ }
+
+ return ops->set_rss(priv->ae_handle, indir, key, hfunc);
+}
+
+static int hns_get_rxnfc(struct net_device *netdev,
+ struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct hns_nic_priv *priv = netdev_priv(netdev);
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = priv->ae_handle->q_num;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static const struct ethtool_ops hns_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USE_ADAPTIVE |
+ ETHTOOL_COALESCE_USECS_LOW_HIGH |
+ ETHTOOL_COALESCE_MAX_FRAMES_LOW_HIGH,
+ .get_drvinfo = hns_nic_get_drvinfo,
+ .get_link = hns_nic_get_link,
+ .get_ringparam = hns_get_ringparam,
+ .get_pauseparam = hns_get_pauseparam,
+ .set_pauseparam = hns_set_pauseparam,
+ .get_coalesce = hns_get_coalesce,
+ .set_coalesce = hns_set_coalesce,
+ .get_channels = hns_get_channels,
+ .self_test = hns_nic_self_test,
+ .get_strings = hns_get_strings,
+ .get_sset_count = hns_get_sset_count,
+ .get_ethtool_stats = hns_get_ethtool_stats,
+ .set_phys_id = hns_set_phys_id,
+ .get_regs_len = hns_get_regs_len,
+ .get_regs = hns_get_regs,
+ .nway_reset = hns_nic_nway_reset,
+ .get_rxfh_key_size = hns_get_rss_key_size,
+ .get_rxfh_indir_size = hns_get_rss_indir_size,
+ .get_rxfh = hns_get_rss,
+ .set_rxfh = hns_set_rss,
+ .get_rxnfc = hns_get_rxnfc,
+ .get_link_ksettings = hns_nic_get_link_ksettings,
+ .set_link_ksettings = hns_nic_set_link_ksettings,
+};
+
+void hns_ethtool_set_ops(struct net_device *ndev)
+{
+ ndev->ethtool_ops = &hns_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/Makefile b/drivers/net/ethernet/hisilicon/hns3/Makefile
new file mode 100644
index 0000000000..e214bfaece
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/Makefile
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Makefile for the HISILICON network device drivers.
+#
+
+ccflags-y += -I$(srctree)/$(src)
+ccflags-y += -I$(srctree)/drivers/net/ethernet/hisilicon/hns3/hns3pf
+ccflags-y += -I$(srctree)/drivers/net/ethernet/hisilicon/hns3/hns3vf
+ccflags-y += -I$(srctree)/drivers/net/ethernet/hisilicon/hns3/hns3_common
+
+obj-$(CONFIG_HNS3) += hnae3.o
+
+obj-$(CONFIG_HNS3_ENET) += hns3.o
+hns3-objs = hns3_enet.o hns3_ethtool.o hns3_debugfs.o
+
+hns3-$(CONFIG_HNS3_DCB) += hns3_dcbnl.o
+
+obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o
+
+hclgevf-objs = hns3vf/hclgevf_main.o hns3vf/hclgevf_mbx.o hns3vf/hclgevf_devlink.o hns3vf/hclgevf_regs.o \
+ hns3_common/hclge_comm_cmd.o hns3_common/hclge_comm_rss.o hns3_common/hclge_comm_tqp_stats.o
+
+obj-$(CONFIG_HNS3_HCLGE) += hclge.o
+hclge-objs = hns3pf/hclge_main.o hns3pf/hclge_mdio.o hns3pf/hclge_tm.o hns3pf/hclge_regs.o \
+ hns3pf/hclge_mbx.o hns3pf/hclge_err.o hns3pf/hclge_debugfs.o hns3pf/hclge_ptp.o hns3pf/hclge_devlink.o \
+ hns3_common/hclge_comm_cmd.o hns3_common/hclge_comm_rss.o hns3_common/hclge_comm_tqp_stats.o
+
+
+hclge-$(CONFIG_HNS3_DCB) += hns3pf/hclge_dcb.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
new file mode 100644
index 0000000000..abcd7877f7
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -0,0 +1,256 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2016-2017 Hisilicon Limited. */
+
+#ifndef __HCLGE_MBX_H
+#define __HCLGE_MBX_H
+#include <linux/init.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+enum HCLGE_MBX_OPCODE {
+ HCLGE_MBX_RESET = 0x01, /* (VF -> PF) assert reset */
+ HCLGE_MBX_ASSERTING_RESET, /* (PF -> VF) PF is asserting reset */
+ HCLGE_MBX_SET_UNICAST, /* (VF -> PF) set UC addr */
+ HCLGE_MBX_SET_MULTICAST, /* (VF -> PF) set MC addr */
+ HCLGE_MBX_SET_VLAN, /* (VF -> PF) set VLAN */
+ HCLGE_MBX_MAP_RING_TO_VECTOR, /* (VF -> PF) map ring-to-vector */
+ HCLGE_MBX_UNMAP_RING_TO_VECTOR, /* (VF -> PF) unamp ring-to-vector */
+ HCLGE_MBX_SET_PROMISC_MODE, /* (VF -> PF) set promiscuous mode */
+ HCLGE_MBX_SET_MACVLAN, /* (VF -> PF) set unicast filter */
+ HCLGE_MBX_API_NEGOTIATE, /* (VF -> PF) negotiate API version */
+ HCLGE_MBX_GET_QINFO, /* (VF -> PF) get queue config */
+ HCLGE_MBX_GET_QDEPTH, /* (VF -> PF) get queue depth */
+ HCLGE_MBX_GET_BASIC_INFO, /* (VF -> PF) get basic info */
+ HCLGE_MBX_GET_RETA, /* (VF -> PF) get RETA */
+ HCLGE_MBX_GET_RSS_KEY, /* (VF -> PF) get RSS key */
+ HCLGE_MBX_GET_MAC_ADDR, /* (VF -> PF) get MAC addr */
+ HCLGE_MBX_PF_VF_RESP, /* (PF -> VF) generate response to VF */
+ HCLGE_MBX_GET_BDNUM, /* (VF -> PF) get BD num */
+ HCLGE_MBX_GET_BUFSIZE, /* (VF -> PF) get buffer size */
+ HCLGE_MBX_GET_STREAMID, /* (VF -> PF) get stream id */
+ HCLGE_MBX_SET_AESTART, /* (VF -> PF) start ae */
+ HCLGE_MBX_SET_TSOSTATS, /* (VF -> PF) get tso stats */
+ HCLGE_MBX_LINK_STAT_CHANGE, /* (PF -> VF) link status has changed */
+ HCLGE_MBX_GET_BASE_CONFIG, /* (VF -> PF) get config */
+ HCLGE_MBX_BIND_FUNC_QUEUE, /* (VF -> PF) bind function and queue */
+ HCLGE_MBX_GET_LINK_STATUS, /* (VF -> PF) get link status */
+ HCLGE_MBX_QUEUE_RESET, /* (VF -> PF) reset queue */
+ HCLGE_MBX_KEEP_ALIVE, /* (VF -> PF) send keep alive cmd */
+ HCLGE_MBX_SET_ALIVE, /* (VF -> PF) set alive state */
+ HCLGE_MBX_SET_MTU, /* (VF -> PF) set mtu */
+ HCLGE_MBX_GET_QID_IN_PF, /* (VF -> PF) get queue id in pf */
+ HCLGE_MBX_LINK_STAT_MODE, /* (PF -> VF) link mode has changed */
+ HCLGE_MBX_GET_LINK_MODE, /* (VF -> PF) get the link mode of pf */
+ HCLGE_MBX_PUSH_VLAN_INFO, /* (PF -> VF) push port base vlan */
+ HCLGE_MBX_GET_MEDIA_TYPE, /* (VF -> PF) get media type */
+ HCLGE_MBX_PUSH_PROMISC_INFO, /* (PF -> VF) push vf promisc info */
+ HCLGE_MBX_VF_UNINIT, /* (VF -> PF) vf is unintializing */
+ HCLGE_MBX_HANDLE_VF_TBL, /* (VF -> PF) store/clear hw table */
+ HCLGE_MBX_GET_RING_VECTOR_MAP, /* (VF -> PF) get ring-to-vector map */
+
+ HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf flr status */
+ HCLGE_MBX_PUSH_LINK_STATUS, /* (M7 -> PF) get port link status */
+ HCLGE_MBX_NCSI_ERROR, /* (M7 -> PF) receive a NCSI error */
+};
+
+/* below are per-VF mac-vlan subcodes */
+enum hclge_mbx_mac_vlan_subcode {
+ HCLGE_MBX_MAC_VLAN_UC_MODIFY = 0, /* modify UC mac addr */
+ HCLGE_MBX_MAC_VLAN_UC_ADD, /* add a new UC mac addr */
+ HCLGE_MBX_MAC_VLAN_UC_REMOVE, /* remove a new UC mac addr */
+ HCLGE_MBX_MAC_VLAN_MC_MODIFY, /* modify MC mac addr */
+ HCLGE_MBX_MAC_VLAN_MC_ADD, /* add new MC mac addr */
+ HCLGE_MBX_MAC_VLAN_MC_REMOVE, /* remove MC mac addr */
+};
+
+/* below are per-VF vlan cfg subcodes */
+enum hclge_mbx_vlan_cfg_subcode {
+ HCLGE_MBX_VLAN_FILTER = 0, /* set vlan filter */
+ HCLGE_MBX_VLAN_TX_OFF_CFG, /* set tx side vlan offload */
+ HCLGE_MBX_VLAN_RX_OFF_CFG, /* set rx side vlan offload */
+ HCLGE_MBX_PORT_BASE_VLAN_CFG, /* set port based vlan configuration */
+ HCLGE_MBX_GET_PORT_BASE_VLAN_STATE, /* get port based vlan state */
+ HCLGE_MBX_ENABLE_VLAN_FILTER,
+};
+
+enum hclge_mbx_tbl_cfg_subcode {
+ HCLGE_MBX_VPORT_LIST_CLEAR,
+};
+
+#define HCLGE_MBX_MAX_MSG_SIZE 14
+#define HCLGE_MBX_MAX_RESP_DATA_SIZE 8U
+#define HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM 4
+
+#define HCLGE_RESET_SCHED_TIMEOUT (3 * HZ)
+#define HCLGE_MBX_SCHED_TIMEOUT (HZ / 2)
+
+struct hclge_ring_chain_param {
+ u8 ring_type;
+ u8 tqp_index;
+ u8 int_gl_index;
+};
+
+struct hclge_basic_info {
+ u8 hw_tc_map;
+ u8 rsv;
+ __le16 mbx_api_version;
+ __le32 pf_caps;
+};
+
+struct hclgevf_mbx_resp_status {
+ struct mutex mbx_mutex; /* protects against contending sync cmd resp */
+ u32 origin_mbx_msg;
+ bool received_resp;
+ int resp_status;
+ u16 match_id;
+ u8 additional_info[HCLGE_MBX_MAX_RESP_DATA_SIZE];
+};
+
+struct hclge_respond_to_vf_msg {
+ int status;
+ u8 data[HCLGE_MBX_MAX_RESP_DATA_SIZE];
+ u16 len;
+};
+
+struct hclge_vf_to_pf_msg {
+ u8 code;
+ union {
+ struct {
+ u8 subcode;
+ u8 data[HCLGE_MBX_MAX_MSG_SIZE];
+ };
+ struct {
+ u8 en_bc;
+ u8 en_uc;
+ u8 en_mc;
+ u8 en_limit_promisc;
+ };
+ struct {
+ u8 vector_id;
+ u8 ring_num;
+ struct hclge_ring_chain_param
+ param[HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM];
+ };
+ };
+};
+
+struct hclge_pf_to_vf_msg {
+ __le16 code;
+ union {
+ /* used for mbx response */
+ struct {
+ __le16 vf_mbx_msg_code;
+ __le16 vf_mbx_msg_subcode;
+ __le16 resp_status;
+ u8 resp_data[HCLGE_MBX_MAX_RESP_DATA_SIZE];
+ };
+ /* used for general mbx */
+ struct {
+ u8 msg_data[HCLGE_MBX_MAX_MSG_SIZE];
+ };
+ };
+};
+
+struct hclge_mbx_vf_to_pf_cmd {
+ u8 rsv;
+ u8 mbx_src_vfid; /* Auto filled by IMP */
+ u8 mbx_need_resp;
+ u8 rsv1[1];
+ u8 msg_len;
+ u8 rsv2;
+ __le16 match_id;
+ struct hclge_vf_to_pf_msg msg;
+};
+
+#define HCLGE_MBX_NEED_RESP_B 0
+
+struct hclge_mbx_pf_to_vf_cmd {
+ u8 dest_vfid;
+ u8 rsv[3];
+ u8 msg_len;
+ u8 rsv1;
+ __le16 match_id;
+ struct hclge_pf_to_vf_msg msg;
+};
+
+struct hclge_vf_rst_cmd {
+ u8 dest_vfid;
+ u8 vf_rst;
+ u8 rsv[22];
+};
+
+#pragma pack(1)
+struct hclge_mbx_link_status {
+ __le16 link_status;
+ __le32 speed;
+ __le16 duplex;
+ u8 flag;
+};
+
+struct hclge_mbx_link_mode {
+ __le16 idx;
+ __le64 link_mode;
+};
+
+struct hclge_mbx_port_base_vlan {
+ __le16 state;
+ __le16 vlan_proto;
+ __le16 qos;
+ __le16 vlan_tag;
+};
+
+struct hclge_mbx_vf_queue_info {
+ __le16 num_tqps;
+ __le16 rss_size;
+ __le16 rx_buf_len;
+};
+
+struct hclge_mbx_vf_queue_depth {
+ __le16 num_tx_desc;
+ __le16 num_rx_desc;
+};
+
+struct hclge_mbx_vlan_filter {
+ u8 is_kill;
+ __le16 vlan_id;
+ __le16 proto;
+};
+
+struct hclge_mbx_mtu_info {
+ __le32 mtu;
+};
+
+#pragma pack()
+
+/* used by VF to store the received Async responses from PF */
+struct hclgevf_mbx_arq_ring {
+#define HCLGE_MBX_MAX_ARQ_MSG_SIZE 8
+#define HCLGE_MBX_MAX_ARQ_MSG_NUM 1024
+ struct hclgevf_dev *hdev;
+ u32 head;
+ u32 tail;
+ atomic_t count;
+ __le16 msg_q[HCLGE_MBX_MAX_ARQ_MSG_NUM][HCLGE_MBX_MAX_ARQ_MSG_SIZE];
+};
+
+struct hclge_dev;
+
+#define HCLGE_MBX_OPCODE_MAX 256
+struct hclge_mbx_ops_param {
+ struct hclge_vport *vport;
+ struct hclge_mbx_vf_to_pf_cmd *req;
+ struct hclge_respond_to_vf_msg *resp_msg;
+};
+
+typedef int (*hclge_mbx_ops_fn)(struct hclge_mbx_ops_param *param);
+
+#define hclge_mbx_ring_ptr_move_crq(crq) \
+ (crq->next_to_use = (crq->next_to_use + 1) % crq->desc_num)
+#define hclge_mbx_tail_ptr_move_arq(arq) \
+ (arq.tail = (arq.tail + 1) % HCLGE_MBX_MAX_ARQ_MSG_NUM)
+#define hclge_mbx_head_ptr_move_arq(arq) \
+ (arq.head = (arq.head + 1) % HCLGE_MBX_MAX_ARQ_MSG_NUM)
+
+/* PF immediately push link status to VFs when link status changed */
+#define HCLGE_MBX_PUSH_LINK_STATUS_EN BIT(0)
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
new file mode 100644
index 0000000000..67b0bf310d
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
@@ -0,0 +1,392 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+
+#include "hnae3.h"
+
+static LIST_HEAD(hnae3_ae_algo_list);
+static LIST_HEAD(hnae3_client_list);
+static LIST_HEAD(hnae3_ae_dev_list);
+
+void hnae3_unregister_ae_algo_prepare(struct hnae3_ae_algo *ae_algo)
+{
+ const struct pci_device_id *pci_id;
+ struct hnae3_ae_dev *ae_dev;
+
+ if (!ae_algo)
+ return;
+
+ list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
+ if (!hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))
+ continue;
+
+ pci_id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev);
+ if (!pci_id)
+ continue;
+ if (IS_ENABLED(CONFIG_PCI_IOV))
+ pci_disable_sriov(ae_dev->pdev);
+ }
+}
+EXPORT_SYMBOL(hnae3_unregister_ae_algo_prepare);
+
+/* we are keeping things simple and using single lock for all the
+ * list. This is a non-critical code so other updations, if happen
+ * in parallel, can wait.
+ */
+static DEFINE_MUTEX(hnae3_common_lock);
+
+static bool hnae3_client_match(enum hnae3_client_type client_type)
+{
+ if (client_type == HNAE3_CLIENT_KNIC ||
+ client_type == HNAE3_CLIENT_ROCE)
+ return true;
+
+ return false;
+}
+
+void hnae3_set_client_init_flag(struct hnae3_client *client,
+ struct hnae3_ae_dev *ae_dev,
+ unsigned int inited)
+{
+ if (!client || !ae_dev)
+ return;
+
+ switch (client->type) {
+ case HNAE3_CLIENT_KNIC:
+ hnae3_set_bit(ae_dev->flag, HNAE3_KNIC_CLIENT_INITED_B, inited);
+ break;
+ case HNAE3_CLIENT_ROCE:
+ hnae3_set_bit(ae_dev->flag, HNAE3_ROCE_CLIENT_INITED_B, inited);
+ break;
+ default:
+ break;
+ }
+}
+EXPORT_SYMBOL(hnae3_set_client_init_flag);
+
+static int hnae3_get_client_init_flag(struct hnae3_client *client,
+ struct hnae3_ae_dev *ae_dev)
+{
+ int inited = 0;
+
+ switch (client->type) {
+ case HNAE3_CLIENT_KNIC:
+ inited = hnae3_get_bit(ae_dev->flag,
+ HNAE3_KNIC_CLIENT_INITED_B);
+ break;
+ case HNAE3_CLIENT_ROCE:
+ inited = hnae3_get_bit(ae_dev->flag,
+ HNAE3_ROCE_CLIENT_INITED_B);
+ break;
+ default:
+ break;
+ }
+
+ return inited;
+}
+
+static int hnae3_init_client_instance(struct hnae3_client *client,
+ struct hnae3_ae_dev *ae_dev)
+{
+ int ret;
+
+ /* check if this client matches the type of ae_dev */
+ if (!(hnae3_client_match(client->type) &&
+ hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))) {
+ return 0;
+ }
+
+ ret = ae_dev->ops->init_client_instance(client, ae_dev);
+ if (ret)
+ dev_err(&ae_dev->pdev->dev,
+ "fail to instantiate client, ret = %d\n", ret);
+
+ return ret;
+}
+
+static void hnae3_uninit_client_instance(struct hnae3_client *client,
+ struct hnae3_ae_dev *ae_dev)
+{
+ /* check if this client matches the type of ae_dev */
+ if (!(hnae3_client_match(client->type) &&
+ hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B)))
+ return;
+
+ if (hnae3_get_client_init_flag(client, ae_dev)) {
+ ae_dev->ops->uninit_client_instance(client, ae_dev);
+
+ hnae3_set_client_init_flag(client, ae_dev, 0);
+ }
+}
+
+int hnae3_register_client(struct hnae3_client *client)
+{
+ struct hnae3_client *client_tmp;
+ struct hnae3_ae_dev *ae_dev;
+
+ if (!client)
+ return -ENODEV;
+
+ mutex_lock(&hnae3_common_lock);
+ /* one system should only have one client for every type */
+ list_for_each_entry(client_tmp, &hnae3_client_list, node) {
+ if (client_tmp->type == client->type)
+ goto exit;
+ }
+
+ list_add_tail(&client->node, &hnae3_client_list);
+
+ /* initialize the client on every matched port */
+ list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
+ /* if the client could not be initialized on current port, for
+ * any error reasons, move on to next available port
+ */
+ int ret = hnae3_init_client_instance(client, ae_dev);
+ if (ret)
+ dev_err(&ae_dev->pdev->dev,
+ "match and instantiation failed for port, ret = %d\n",
+ ret);
+ }
+
+exit:
+ mutex_unlock(&hnae3_common_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(hnae3_register_client);
+
+void hnae3_unregister_client(struct hnae3_client *client)
+{
+ struct hnae3_client *client_tmp;
+ struct hnae3_ae_dev *ae_dev;
+ bool existed = false;
+
+ if (!client)
+ return;
+
+ mutex_lock(&hnae3_common_lock);
+ /* one system should only have one client for every type */
+ list_for_each_entry(client_tmp, &hnae3_client_list, node) {
+ if (client_tmp->type == client->type) {
+ existed = true;
+ break;
+ }
+ }
+
+ if (!existed) {
+ mutex_unlock(&hnae3_common_lock);
+ pr_err("client %s does not exist!\n", client->name);
+ return;
+ }
+
+ /* un-initialize the client on every matched port */
+ list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
+ hnae3_uninit_client_instance(client, ae_dev);
+ }
+
+ list_del(&client->node);
+ mutex_unlock(&hnae3_common_lock);
+}
+EXPORT_SYMBOL(hnae3_unregister_client);
+
+/* hnae3_register_ae_algo - register a AE algorithm to hnae3 framework
+ * @ae_algo: AE algorithm
+ * NOTE: the duplicated name will not be checked
+ */
+void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo)
+{
+ const struct pci_device_id *id;
+ struct hnae3_ae_dev *ae_dev;
+ struct hnae3_client *client;
+ int ret;
+
+ if (!ae_algo)
+ return;
+
+ mutex_lock(&hnae3_common_lock);
+
+ list_add_tail(&ae_algo->node, &hnae3_ae_algo_list);
+
+ /* Check if this algo/ops matches the list of ae_devs */
+ list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
+ id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev);
+ if (!id)
+ continue;
+
+ if (!ae_algo->ops) {
+ dev_err(&ae_dev->pdev->dev, "ae_algo ops are null\n");
+ continue;
+ }
+ ae_dev->ops = ae_algo->ops;
+
+ ret = ae_algo->ops->init_ae_dev(ae_dev);
+ if (ret) {
+ dev_err(&ae_dev->pdev->dev,
+ "init ae_dev error, ret = %d\n", ret);
+ continue;
+ }
+
+ /* ae_dev init should set flag */
+ hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1);
+
+ /* check the client list for the match with this ae_dev type and
+ * initialize the figure out client instance
+ */
+ list_for_each_entry(client, &hnae3_client_list, node) {
+ ret = hnae3_init_client_instance(client, ae_dev);
+ if (ret)
+ dev_err(&ae_dev->pdev->dev,
+ "match and instantiation failed, ret = %d\n",
+ ret);
+ }
+ }
+
+ mutex_unlock(&hnae3_common_lock);
+}
+EXPORT_SYMBOL(hnae3_register_ae_algo);
+
+/* hnae3_unregister_ae_algo - unregisters a AE algorithm
+ * @ae_algo: the AE algorithm to unregister
+ */
+void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo)
+{
+ const struct pci_device_id *id;
+ struct hnae3_ae_dev *ae_dev;
+ struct hnae3_client *client;
+
+ if (!ae_algo)
+ return;
+
+ mutex_lock(&hnae3_common_lock);
+ /* Check if there are matched ae_dev */
+ list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
+ if (!hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))
+ continue;
+
+ id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev);
+ if (!id)
+ continue;
+
+ /* check the client list for the match with this ae_dev type and
+ * un-initialize the figure out client instance
+ */
+ list_for_each_entry(client, &hnae3_client_list, node)
+ hnae3_uninit_client_instance(client, ae_dev);
+
+ ae_algo->ops->uninit_ae_dev(ae_dev);
+ hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
+ ae_dev->ops = NULL;
+ }
+
+ list_del(&ae_algo->node);
+ mutex_unlock(&hnae3_common_lock);
+}
+EXPORT_SYMBOL(hnae3_unregister_ae_algo);
+
+/* hnae3_register_ae_dev - registers a AE device to hnae3 framework
+ * @ae_dev: the AE device
+ * NOTE: the duplicated name will not be checked
+ */
+int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
+{
+ const struct pci_device_id *id;
+ struct hnae3_ae_algo *ae_algo;
+ struct hnae3_client *client;
+ int ret;
+
+ if (!ae_dev)
+ return -ENODEV;
+
+ mutex_lock(&hnae3_common_lock);
+
+ list_add_tail(&ae_dev->node, &hnae3_ae_dev_list);
+
+ /* Check if there are matched ae_algo */
+ list_for_each_entry(ae_algo, &hnae3_ae_algo_list, node) {
+ id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev);
+ if (!id)
+ continue;
+
+ if (!ae_algo->ops) {
+ dev_err(&ae_dev->pdev->dev, "ae_algo ops are null\n");
+ ret = -EOPNOTSUPP;
+ goto out_err;
+ }
+ ae_dev->ops = ae_algo->ops;
+
+ ret = ae_dev->ops->init_ae_dev(ae_dev);
+ if (ret) {
+ dev_err(&ae_dev->pdev->dev,
+ "init ae_dev error, ret = %d\n", ret);
+ goto out_err;
+ }
+
+ /* ae_dev init should set flag */
+ hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1);
+ break;
+ }
+
+ /* check the client list for the match with this ae_dev type and
+ * initialize the figure out client instance
+ */
+ list_for_each_entry(client, &hnae3_client_list, node) {
+ ret = hnae3_init_client_instance(client, ae_dev);
+ if (ret)
+ dev_err(&ae_dev->pdev->dev,
+ "match and instantiation failed, ret = %d\n",
+ ret);
+ }
+
+ mutex_unlock(&hnae3_common_lock);
+
+ return 0;
+
+out_err:
+ list_del(&ae_dev->node);
+ mutex_unlock(&hnae3_common_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(hnae3_register_ae_dev);
+
+/* hnae3_unregister_ae_dev - unregisters a AE device
+ * @ae_dev: the AE device to unregister
+ */
+void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev)
+{
+ const struct pci_device_id *id;
+ struct hnae3_ae_algo *ae_algo;
+ struct hnae3_client *client;
+
+ if (!ae_dev)
+ return;
+
+ mutex_lock(&hnae3_common_lock);
+ /* Check if there are matched ae_algo */
+ list_for_each_entry(ae_algo, &hnae3_ae_algo_list, node) {
+ if (!hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))
+ continue;
+
+ id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev);
+ if (!id)
+ continue;
+
+ list_for_each_entry(client, &hnae3_client_list, node)
+ hnae3_uninit_client_instance(client, ae_dev);
+
+ ae_algo->ops->uninit_ae_dev(ae_dev);
+ hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
+ ae_dev->ops = NULL;
+ }
+
+ list_del(&ae_dev->node);
+ mutex_unlock(&hnae3_common_lock);
+}
+EXPORT_SYMBOL(hnae3_unregister_ae_dev);
+
+MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("HNAE3(Hisilicon Network Acceleration Engine) Framework");
+MODULE_VERSION(HNAE3_MOD_VERSION);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
new file mode 100644
index 0000000000..aaf1f42624
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -0,0 +1,949 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+// Copyright (c) 2016-2017 Hisilicon Limited.
+
+#ifndef __HNAE3_H
+#define __HNAE3_H
+
+/* Names used in this framework:
+ * ae handle (handle):
+ * a set of queues provided by AE
+ * ring buffer queue (rbq):
+ * the channel between upper layer and the AE, can do tx and rx
+ * ring:
+ * a tx or rx channel within a rbq
+ * ring description (desc):
+ * an element in the ring with packet information
+ * buffer:
+ * a memory region referred by desc with the full packet payload
+ *
+ * "num" means a static number set as a parameter, "count" mean a dynamic
+ * number set while running
+ * "cb" means control block
+ */
+
+#include <linux/acpi.h>
+#include <linux/dcbnl.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/ethtool.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/pkt_sched.h>
+#include <linux/types.h>
+#include <linux/bitmap.h>
+#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
+
+#define HNAE3_MOD_VERSION "1.0"
+
+#define HNAE3_MIN_VECTOR_NUM 2 /* first one for misc, another for IO */
+
+/* Device version */
+#define HNAE3_DEVICE_VERSION_V1 0x00020
+#define HNAE3_DEVICE_VERSION_V2 0x00021
+#define HNAE3_DEVICE_VERSION_V3 0x00030
+
+#define HNAE3_PCI_REVISION_BIT_SIZE 8
+
+/* Device IDs */
+#define HNAE3_DEV_ID_GE 0xA220
+#define HNAE3_DEV_ID_25GE 0xA221
+#define HNAE3_DEV_ID_25GE_RDMA 0xA222
+#define HNAE3_DEV_ID_25GE_RDMA_MACSEC 0xA223
+#define HNAE3_DEV_ID_50GE_RDMA 0xA224
+#define HNAE3_DEV_ID_50GE_RDMA_MACSEC 0xA225
+#define HNAE3_DEV_ID_100G_RDMA_MACSEC 0xA226
+#define HNAE3_DEV_ID_200G_RDMA 0xA228
+#define HNAE3_DEV_ID_VF 0xA22E
+#define HNAE3_DEV_ID_RDMA_DCB_PFC_VF 0xA22F
+
+#define HNAE3_CLASS_NAME_SIZE 16
+
+#define HNAE3_DEV_INITED_B 0x0
+#define HNAE3_DEV_SUPPORT_ROCE_B 0x1
+#define HNAE3_DEV_SUPPORT_DCB_B 0x2
+#define HNAE3_KNIC_CLIENT_INITED_B 0x3
+#define HNAE3_UNIC_CLIENT_INITED_B 0x4
+#define HNAE3_ROCE_CLIENT_INITED_B 0x5
+
+#define HNAE3_DEV_SUPPORT_ROCE_DCB_BITS (BIT(HNAE3_DEV_SUPPORT_DCB_B) | \
+ BIT(HNAE3_DEV_SUPPORT_ROCE_B))
+
+#define hnae3_dev_roce_supported(hdev) \
+ hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)
+
+#define hnae3_dev_dcb_supported(hdev) \
+ hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B)
+
+enum HNAE3_DEV_CAP_BITS {
+ HNAE3_DEV_SUPPORT_FD_B,
+ HNAE3_DEV_SUPPORT_GRO_B,
+ HNAE3_DEV_SUPPORT_FEC_B,
+ HNAE3_DEV_SUPPORT_UDP_GSO_B,
+ HNAE3_DEV_SUPPORT_QB_B,
+ HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B,
+ HNAE3_DEV_SUPPORT_PTP_B,
+ HNAE3_DEV_SUPPORT_INT_QL_B,
+ HNAE3_DEV_SUPPORT_HW_TX_CSUM_B,
+ HNAE3_DEV_SUPPORT_TX_PUSH_B,
+ HNAE3_DEV_SUPPORT_PHY_IMP_B,
+ HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B,
+ HNAE3_DEV_SUPPORT_HW_PAD_B,
+ HNAE3_DEV_SUPPORT_STASH_B,
+ HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B,
+ HNAE3_DEV_SUPPORT_PAUSE_B,
+ HNAE3_DEV_SUPPORT_RAS_IMP_B,
+ HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B,
+ HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B,
+ HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B,
+ HNAE3_DEV_SUPPORT_MC_MAC_MNG_B,
+ HNAE3_DEV_SUPPORT_CQ_B,
+ HNAE3_DEV_SUPPORT_FEC_STATS_B,
+ HNAE3_DEV_SUPPORT_LANE_NUM_B,
+ HNAE3_DEV_SUPPORT_WOL_B,
+ HNAE3_DEV_SUPPORT_TM_FLUSH_B,
+};
+
+#define hnae3_ae_dev_fd_supported(ae_dev) \
+ test_bit(HNAE3_DEV_SUPPORT_FD_B, (ae_dev)->caps)
+
+#define hnae3_ae_dev_gro_supported(ae_dev) \
+ test_bit(HNAE3_DEV_SUPPORT_GRO_B, (ae_dev)->caps)
+
+#define hnae3_dev_fec_supported(hdev) \
+ test_bit(HNAE3_DEV_SUPPORT_FEC_B, (hdev)->ae_dev->caps)
+
+#define hnae3_dev_udp_gso_supported(hdev) \
+ test_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, (hdev)->ae_dev->caps)
+
+#define hnae3_dev_qb_supported(hdev) \
+ test_bit(HNAE3_DEV_SUPPORT_QB_B, (hdev)->ae_dev->caps)
+
+#define hnae3_dev_fd_forward_tc_supported(hdev) \
+ test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, (hdev)->ae_dev->caps)
+
+#define hnae3_dev_ptp_supported(hdev) \
+ test_bit(HNAE3_DEV_SUPPORT_PTP_B, (hdev)->ae_dev->caps)
+
+#define hnae3_dev_int_ql_supported(hdev) \
+ test_bit(HNAE3_DEV_SUPPORT_INT_QL_B, (hdev)->ae_dev->caps)
+
+#define hnae3_dev_hw_csum_supported(hdev) \
+ test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, (hdev)->ae_dev->caps)
+
+#define hnae3_dev_tx_push_supported(hdev) \
+ test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, (hdev)->ae_dev->caps)
+
+#define hnae3_dev_phy_imp_supported(hdev) \
+ test_bit(HNAE3_DEV_SUPPORT_PHY_IMP_B, (hdev)->ae_dev->caps)
+
+#define hnae3_dev_ras_imp_supported(hdev) \
+ test_bit(HNAE3_DEV_SUPPORT_RAS_IMP_B, (hdev)->ae_dev->caps)
+
+#define hnae3_dev_tqp_txrx_indep_supported(hdev) \
+ test_bit(HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B, (hdev)->ae_dev->caps)
+
+#define hnae3_dev_hw_pad_supported(hdev) \
+ test_bit(HNAE3_DEV_SUPPORT_HW_PAD_B, (hdev)->ae_dev->caps)
+
+#define hnae3_dev_stash_supported(hdev) \
+ test_bit(HNAE3_DEV_SUPPORT_STASH_B, (hdev)->ae_dev->caps)
+
+#define hnae3_dev_pause_supported(hdev) \
+ test_bit(HNAE3_DEV_SUPPORT_PAUSE_B, (hdev)->ae_dev->caps)
+
+#define hnae3_ae_dev_tqp_txrx_indep_supported(ae_dev) \
+ test_bit(HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B, (ae_dev)->caps)
+
+#define hnae3_ae_dev_rxd_adv_layout_supported(ae_dev) \
+ test_bit(HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B, (ae_dev)->caps)
+
+#define hnae3_ae_dev_mc_mac_mng_supported(ae_dev) \
+ test_bit(HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, (ae_dev)->caps)
+
+#define hnae3_ae_dev_cq_supported(ae_dev) \
+ test_bit(HNAE3_DEV_SUPPORT_CQ_B, (ae_dev)->caps)
+
+#define hnae3_ae_dev_fec_stats_supported(ae_dev) \
+ test_bit(HNAE3_DEV_SUPPORT_FEC_STATS_B, (ae_dev)->caps)
+
+#define hnae3_ae_dev_lane_num_supported(ae_dev) \
+ test_bit(HNAE3_DEV_SUPPORT_LANE_NUM_B, (ae_dev)->caps)
+
+#define hnae3_ae_dev_wol_supported(ae_dev) \
+ test_bit(HNAE3_DEV_SUPPORT_WOL_B, (ae_dev)->caps)
+
+#define hnae3_ae_dev_tm_flush_supported(hdev) \
+ test_bit(HNAE3_DEV_SUPPORT_TM_FLUSH_B, (hdev)->ae_dev->caps)
+
+enum HNAE3_PF_CAP_BITS {
+ HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B = 0,
+};
+#define ring_ptr_move_fw(ring, p) \
+ ((ring)->p = ((ring)->p + 1) % (ring)->desc_num)
+#define ring_ptr_move_bw(ring, p) \
+ ((ring)->p = ((ring)->p - 1 + (ring)->desc_num) % (ring)->desc_num)
+
+struct hnae3_handle;
+
+struct hnae3_queue {
+ void __iomem *io_base;
+ void __iomem *mem_base;
+ struct hnae3_ae_algo *ae_algo;
+ struct hnae3_handle *handle;
+ int tqp_index; /* index in a handle */
+ u32 buf_size; /* size for hnae_desc->addr, preset by AE */
+ u16 tx_desc_num; /* total number of tx desc */
+ u16 rx_desc_num; /* total number of rx desc */
+};
+
+struct hns3_mac_stats {
+ u64 tx_pause_cnt;
+ u64 rx_pause_cnt;
+};
+
+/* hnae3 loop mode */
+enum hnae3_loop {
+ HNAE3_LOOP_EXTERNAL,
+ HNAE3_LOOP_APP,
+ HNAE3_LOOP_SERIAL_SERDES,
+ HNAE3_LOOP_PARALLEL_SERDES,
+ HNAE3_LOOP_PHY,
+ HNAE3_LOOP_NONE,
+};
+
+enum hnae3_client_type {
+ HNAE3_CLIENT_KNIC,
+ HNAE3_CLIENT_ROCE,
+};
+
+/* mac media type */
+enum hnae3_media_type {
+ HNAE3_MEDIA_TYPE_UNKNOWN,
+ HNAE3_MEDIA_TYPE_FIBER,
+ HNAE3_MEDIA_TYPE_COPPER,
+ HNAE3_MEDIA_TYPE_BACKPLANE,
+ HNAE3_MEDIA_TYPE_NONE,
+};
+
+/* must be consistent with definition in firmware */
+enum hnae3_module_type {
+ HNAE3_MODULE_TYPE_UNKNOWN = 0x00,
+ HNAE3_MODULE_TYPE_FIBRE_LR = 0x01,
+ HNAE3_MODULE_TYPE_FIBRE_SR = 0x02,
+ HNAE3_MODULE_TYPE_AOC = 0x03,
+ HNAE3_MODULE_TYPE_CR = 0x04,
+ HNAE3_MODULE_TYPE_KR = 0x05,
+ HNAE3_MODULE_TYPE_TP = 0x06,
+};
+
+enum hnae3_fec_mode {
+ HNAE3_FEC_AUTO = 0,
+ HNAE3_FEC_BASER,
+ HNAE3_FEC_RS,
+ HNAE3_FEC_LLRS,
+ HNAE3_FEC_NONE,
+ HNAE3_FEC_USER_DEF,
+};
+
+enum hnae3_reset_notify_type {
+ HNAE3_UP_CLIENT,
+ HNAE3_DOWN_CLIENT,
+ HNAE3_INIT_CLIENT,
+ HNAE3_UNINIT_CLIENT,
+};
+
+enum hnae3_hw_error_type {
+ HNAE3_PPU_POISON_ERROR,
+ HNAE3_CMDQ_ECC_ERROR,
+ HNAE3_IMP_RD_POISON_ERROR,
+ HNAE3_ROCEE_AXI_RESP_ERROR,
+};
+
+enum hnae3_reset_type {
+ HNAE3_VF_RESET,
+ HNAE3_VF_FUNC_RESET,
+ HNAE3_VF_PF_FUNC_RESET,
+ HNAE3_VF_FULL_RESET,
+ HNAE3_FLR_RESET,
+ HNAE3_FUNC_RESET,
+ HNAE3_GLOBAL_RESET,
+ HNAE3_IMP_RESET,
+ HNAE3_NONE_RESET,
+ HNAE3_MAX_RESET,
+};
+
+enum hnae3_port_base_vlan_state {
+ HNAE3_PORT_BASE_VLAN_DISABLE,
+ HNAE3_PORT_BASE_VLAN_ENABLE,
+ HNAE3_PORT_BASE_VLAN_MODIFY,
+ HNAE3_PORT_BASE_VLAN_NOCHANGE,
+};
+
+enum hnae3_dbg_cmd {
+ HNAE3_DBG_CMD_TM_NODES,
+ HNAE3_DBG_CMD_TM_PRI,
+ HNAE3_DBG_CMD_TM_QSET,
+ HNAE3_DBG_CMD_TM_MAP,
+ HNAE3_DBG_CMD_TM_PG,
+ HNAE3_DBG_CMD_TM_PORT,
+ HNAE3_DBG_CMD_TC_SCH_INFO,
+ HNAE3_DBG_CMD_QOS_PAUSE_CFG,
+ HNAE3_DBG_CMD_QOS_PRI_MAP,
+ HNAE3_DBG_CMD_QOS_DSCP_MAP,
+ HNAE3_DBG_CMD_QOS_BUF_CFG,
+ HNAE3_DBG_CMD_DEV_INFO,
+ HNAE3_DBG_CMD_TX_BD,
+ HNAE3_DBG_CMD_RX_BD,
+ HNAE3_DBG_CMD_MAC_UC,
+ HNAE3_DBG_CMD_MAC_MC,
+ HNAE3_DBG_CMD_MNG_TBL,
+ HNAE3_DBG_CMD_LOOPBACK,
+ HNAE3_DBG_CMD_PTP_INFO,
+ HNAE3_DBG_CMD_INTERRUPT_INFO,
+ HNAE3_DBG_CMD_RESET_INFO,
+ HNAE3_DBG_CMD_IMP_INFO,
+ HNAE3_DBG_CMD_NCL_CONFIG,
+ HNAE3_DBG_CMD_REG_BIOS_COMMON,
+ HNAE3_DBG_CMD_REG_SSU,
+ HNAE3_DBG_CMD_REG_IGU_EGU,
+ HNAE3_DBG_CMD_REG_RPU,
+ HNAE3_DBG_CMD_REG_NCSI,
+ HNAE3_DBG_CMD_REG_RTC,
+ HNAE3_DBG_CMD_REG_PPP,
+ HNAE3_DBG_CMD_REG_RCB,
+ HNAE3_DBG_CMD_REG_TQP,
+ HNAE3_DBG_CMD_REG_MAC,
+ HNAE3_DBG_CMD_REG_DCB,
+ HNAE3_DBG_CMD_VLAN_CONFIG,
+ HNAE3_DBG_CMD_QUEUE_MAP,
+ HNAE3_DBG_CMD_RX_QUEUE_INFO,
+ HNAE3_DBG_CMD_TX_QUEUE_INFO,
+ HNAE3_DBG_CMD_FD_TCAM,
+ HNAE3_DBG_CMD_FD_COUNTER,
+ HNAE3_DBG_CMD_MAC_TNL_STATUS,
+ HNAE3_DBG_CMD_SERV_INFO,
+ HNAE3_DBG_CMD_UMV_INFO,
+ HNAE3_DBG_CMD_PAGE_POOL_INFO,
+ HNAE3_DBG_CMD_COAL_INFO,
+ HNAE3_DBG_CMD_UNKNOWN,
+};
+
+enum hnae3_tc_map_mode {
+ HNAE3_TC_MAP_MODE_PRIO,
+ HNAE3_TC_MAP_MODE_DSCP,
+};
+
+struct hnae3_vector_info {
+ u8 __iomem *io_addr;
+ int vector;
+};
+
+#define HNAE3_RING_TYPE_B 0
+#define HNAE3_RING_TYPE_TX 0
+#define HNAE3_RING_TYPE_RX 1
+#define HNAE3_RING_GL_IDX_S 0
+#define HNAE3_RING_GL_IDX_M GENMASK(1, 0)
+#define HNAE3_RING_GL_RX 0
+#define HNAE3_RING_GL_TX 1
+
+#define HNAE3_FW_VERSION_BYTE3_SHIFT 24
+#define HNAE3_FW_VERSION_BYTE3_MASK GENMASK(31, 24)
+#define HNAE3_FW_VERSION_BYTE2_SHIFT 16
+#define HNAE3_FW_VERSION_BYTE2_MASK GENMASK(23, 16)
+#define HNAE3_FW_VERSION_BYTE1_SHIFT 8
+#define HNAE3_FW_VERSION_BYTE1_MASK GENMASK(15, 8)
+#define HNAE3_FW_VERSION_BYTE0_SHIFT 0
+#define HNAE3_FW_VERSION_BYTE0_MASK GENMASK(7, 0)
+
+struct hnae3_ring_chain_node {
+ struct hnae3_ring_chain_node *next;
+ u32 tqp_index;
+ u32 flag;
+ u32 int_gl_idx;
+};
+
+#define HNAE3_IS_TX_RING(node) \
+ (((node)->flag & 1 << HNAE3_RING_TYPE_B) == HNAE3_RING_TYPE_TX)
+
+/* device specification info from firmware */
+struct hnae3_dev_specs {
+ u32 mac_entry_num; /* number of mac-vlan table entry */
+ u32 mng_entry_num; /* number of manager table entry */
+ u32 max_tm_rate;
+ u16 rss_ind_tbl_size;
+ u16 rss_key_size;
+ u16 int_ql_max; /* max value of interrupt coalesce based on INT_QL */
+ u16 max_int_gl; /* max value of interrupt coalesce based on INT_GL */
+ u8 max_non_tso_bd_num; /* max BD number of one non-TSO packet */
+ u16 max_frm_size;
+ u16 max_qset_num;
+ u16 umv_size;
+ u16 mc_mac_size;
+ u32 mac_stats_num;
+ u8 tnl_num;
+};
+
+struct hnae3_client_ops {
+ int (*init_instance)(struct hnae3_handle *handle);
+ void (*uninit_instance)(struct hnae3_handle *handle, bool reset);
+ void (*link_status_change)(struct hnae3_handle *handle, bool state);
+ int (*reset_notify)(struct hnae3_handle *handle,
+ enum hnae3_reset_notify_type type);
+ void (*process_hw_error)(struct hnae3_handle *handle,
+ enum hnae3_hw_error_type);
+};
+
+#define HNAE3_CLIENT_NAME_LENGTH 16
+struct hnae3_client {
+ char name[HNAE3_CLIENT_NAME_LENGTH];
+ unsigned long state;
+ enum hnae3_client_type type;
+ const struct hnae3_client_ops *ops;
+ struct list_head node;
+};
+
+#define HNAE3_DEV_CAPS_MAX_NUM 96
+struct hnae3_ae_dev {
+ struct pci_dev *pdev;
+ const struct hnae3_ae_ops *ops;
+ struct list_head node;
+ u32 flag;
+ unsigned long hw_err_reset_req;
+ struct hnae3_dev_specs dev_specs;
+ u32 dev_version;
+ DECLARE_BITMAP(caps, HNAE3_DEV_CAPS_MAX_NUM);
+ void *priv;
+};
+
+/* This struct defines the operation on the handle.
+ *
+ * init_ae_dev(): (mandatory)
+ * Get PF configure from pci_dev and initialize PF hardware
+ * uninit_ae_dev()
+ * Disable PF device and release PF resource
+ * register_client
+ * Register client to ae_dev
+ * unregister_client()
+ * Unregister client from ae_dev
+ * start()
+ * Enable the hardware
+ * stop()
+ * Disable the hardware
+ * start_client()
+ * Inform the hclge that client has been started
+ * stop_client()
+ * Inform the hclge that client has been stopped
+ * get_status()
+ * Get the carrier state of the back channel of the handle, 1 for ok, 0 for
+ * non-ok
+ * get_ksettings_an_result()
+ * Get negotiation status,speed and duplex
+ * get_media_type()
+ * Get media type of MAC
+ * check_port_speed()
+ * Check target speed whether is supported
+ * adjust_link()
+ * Adjust link status
+ * set_loopback()
+ * Set loopback
+ * set_promisc_mode
+ * Set promisc mode
+ * request_update_promisc_mode
+ * request to hclge(vf) to update promisc mode
+ * set_mtu()
+ * set mtu
+ * get_pauseparam()
+ * get tx and rx of pause frame use
+ * set_pauseparam()
+ * set tx and rx of pause frame use
+ * set_autoneg()
+ * set auto autonegotiation of pause frame use
+ * get_autoneg()
+ * get auto autonegotiation of pause frame use
+ * restart_autoneg()
+ * restart autonegotiation
+ * halt_autoneg()
+ * halt/resume autonegotiation when autonegotiation on
+ * get_coalesce_usecs()
+ * get usecs to delay a TX interrupt after a packet is sent
+ * get_rx_max_coalesced_frames()
+ * get Maximum number of packets to be sent before a TX interrupt.
+ * set_coalesce_usecs()
+ * set usecs to delay a TX interrupt after a packet is sent
+ * set_coalesce_frames()
+ * set Maximum number of packets to be sent before a TX interrupt.
+ * get_mac_addr()
+ * get mac address
+ * set_mac_addr()
+ * set mac address
+ * add_uc_addr
+ * Add unicast addr to mac table
+ * rm_uc_addr
+ * Remove unicast addr from mac table
+ * set_mc_addr()
+ * Set multicast address
+ * add_mc_addr
+ * Add multicast address to mac table
+ * rm_mc_addr
+ * Remove multicast address from mac table
+ * update_stats()
+ * Update Old network device statistics
+ * get_mac_stats()
+ * get mac pause statistics including tx_cnt and rx_cnt
+ * get_ethtool_stats()
+ * Get ethtool network device statistics
+ * get_strings()
+ * Get a set of strings that describe the requested objects
+ * get_sset_count()
+ * Get number of strings that @get_strings will write
+ * update_led_status()
+ * Update the led status
+ * set_led_id()
+ * Set led id
+ * get_regs()
+ * Get regs dump
+ * get_regs_len()
+ * Get the len of the regs dump
+ * get_rss_key_size()
+ * Get rss key size
+ * get_rss()
+ * Get rss table
+ * set_rss()
+ * Set rss table
+ * get_tc_size()
+ * Get tc size of handle
+ * get_vector()
+ * Get vector number and vector information
+ * put_vector()
+ * Put the vector in hdev
+ * map_ring_to_vector()
+ * Map rings to vector
+ * unmap_ring_from_vector()
+ * Unmap rings from vector
+ * reset_queue()
+ * Reset queue
+ * get_fw_version()
+ * Get firmware version
+ * get_mdix_mode()
+ * Get media typr of phy
+ * enable_vlan_filter()
+ * Enable vlan filter
+ * set_vlan_filter()
+ * Set vlan filter config of Ports
+ * set_vf_vlan_filter()
+ * Set vlan filter config of vf
+ * enable_hw_strip_rxvtag()
+ * Enable/disable hardware strip vlan tag of packets received
+ * set_gro_en
+ * Enable/disable HW GRO
+ * add_arfs_entry
+ * Check the 5-tuples of flow, and create flow director rule
+ * get_vf_config
+ * Get the VF configuration setting by the host
+ * set_vf_link_state
+ * Set VF link status
+ * set_vf_spoofchk
+ * Enable/disable spoof check for specified vf
+ * set_vf_trust
+ * Enable/disable trust for specified vf, if the vf being trusted, then
+ * it can enable promisc mode
+ * set_vf_rate
+ * Set the max tx rate of specified vf.
+ * set_vf_mac
+ * Configure the default MAC for specified VF
+ * get_module_eeprom
+ * Get the optical module eeprom info.
+ * add_cls_flower
+ * Add clsflower rule
+ * del_cls_flower
+ * Delete clsflower rule
+ * cls_flower_active
+ * Check if any cls flower rule exist
+ * dbg_read_cmd
+ * Execute debugfs read command.
+ * set_tx_hwts_info
+ * Save information for 1588 tx packet
+ * get_rx_hwts
+ * Get 1588 rx hwstamp
+ * get_ts_info
+ * Get phc info
+ * clean_vf_config
+ * Clean residual vf info after disable sriov
+ * get_wol
+ * Get wake on lan info
+ * set_wol
+ * Config wake on lan
+ */
+struct hnae3_ae_ops {
+ int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev);
+ void (*uninit_ae_dev)(struct hnae3_ae_dev *ae_dev);
+ void (*reset_prepare)(struct hnae3_ae_dev *ae_dev,
+ enum hnae3_reset_type rst_type);
+ void (*reset_done)(struct hnae3_ae_dev *ae_dev);
+ int (*init_client_instance)(struct hnae3_client *client,
+ struct hnae3_ae_dev *ae_dev);
+ void (*uninit_client_instance)(struct hnae3_client *client,
+ struct hnae3_ae_dev *ae_dev);
+ int (*start)(struct hnae3_handle *handle);
+ void (*stop)(struct hnae3_handle *handle);
+ int (*client_start)(struct hnae3_handle *handle);
+ void (*client_stop)(struct hnae3_handle *handle);
+ int (*get_status)(struct hnae3_handle *handle);
+ void (*get_ksettings_an_result)(struct hnae3_handle *handle,
+ u8 *auto_neg, u32 *speed, u8 *duplex,
+ u32 *lane_num);
+
+ int (*cfg_mac_speed_dup_h)(struct hnae3_handle *handle, int speed,
+ u8 duplex, u8 lane_num);
+
+ void (*get_media_type)(struct hnae3_handle *handle, u8 *media_type,
+ u8 *module_type);
+ int (*check_port_speed)(struct hnae3_handle *handle, u32 speed);
+ void (*get_fec_stats)(struct hnae3_handle *handle,
+ struct ethtool_fec_stats *fec_stats);
+ void (*get_fec)(struct hnae3_handle *handle, u8 *fec_ability,
+ u8 *fec_mode);
+ int (*set_fec)(struct hnae3_handle *handle, u32 fec_mode);
+ void (*adjust_link)(struct hnae3_handle *handle, int speed, int duplex);
+ int (*set_loopback)(struct hnae3_handle *handle,
+ enum hnae3_loop loop_mode, bool en);
+
+ int (*set_promisc_mode)(struct hnae3_handle *handle, bool en_uc_pmc,
+ bool en_mc_pmc);
+ void (*request_update_promisc_mode)(struct hnae3_handle *handle);
+ int (*set_mtu)(struct hnae3_handle *handle, int new_mtu);
+
+ void (*get_pauseparam)(struct hnae3_handle *handle,
+ u32 *auto_neg, u32 *rx_en, u32 *tx_en);
+ int (*set_pauseparam)(struct hnae3_handle *handle,
+ u32 auto_neg, u32 rx_en, u32 tx_en);
+
+ int (*set_autoneg)(struct hnae3_handle *handle, bool enable);
+ int (*get_autoneg)(struct hnae3_handle *handle);
+ int (*restart_autoneg)(struct hnae3_handle *handle);
+ int (*halt_autoneg)(struct hnae3_handle *handle, bool halt);
+
+ void (*get_coalesce_usecs)(struct hnae3_handle *handle,
+ u32 *tx_usecs, u32 *rx_usecs);
+ void (*get_rx_max_coalesced_frames)(struct hnae3_handle *handle,
+ u32 *tx_frames, u32 *rx_frames);
+ int (*set_coalesce_usecs)(struct hnae3_handle *handle, u32 timeout);
+ int (*set_coalesce_frames)(struct hnae3_handle *handle,
+ u32 coalesce_frames);
+ void (*get_coalesce_range)(struct hnae3_handle *handle,
+ u32 *tx_frames_low, u32 *rx_frames_low,
+ u32 *tx_frames_high, u32 *rx_frames_high,
+ u32 *tx_usecs_low, u32 *rx_usecs_low,
+ u32 *tx_usecs_high, u32 *rx_usecs_high);
+
+ void (*get_mac_addr)(struct hnae3_handle *handle, u8 *p);
+ int (*set_mac_addr)(struct hnae3_handle *handle, const void *p,
+ bool is_first);
+ int (*do_ioctl)(struct hnae3_handle *handle,
+ struct ifreq *ifr, int cmd);
+ int (*add_uc_addr)(struct hnae3_handle *handle,
+ const unsigned char *addr);
+ int (*rm_uc_addr)(struct hnae3_handle *handle,
+ const unsigned char *addr);
+ int (*set_mc_addr)(struct hnae3_handle *handle, void *addr);
+ int (*add_mc_addr)(struct hnae3_handle *handle,
+ const unsigned char *addr);
+ int (*rm_mc_addr)(struct hnae3_handle *handle,
+ const unsigned char *addr);
+ void (*set_tso_stats)(struct hnae3_handle *handle, int enable);
+ void (*update_stats)(struct hnae3_handle *handle);
+ void (*get_stats)(struct hnae3_handle *handle, u64 *data);
+ void (*get_mac_stats)(struct hnae3_handle *handle,
+ struct hns3_mac_stats *mac_stats);
+ void (*get_strings)(struct hnae3_handle *handle,
+ u32 stringset, u8 *data);
+ int (*get_sset_count)(struct hnae3_handle *handle, int stringset);
+
+ void (*get_regs)(struct hnae3_handle *handle, u32 *version,
+ void *data);
+ int (*get_regs_len)(struct hnae3_handle *handle);
+
+ u32 (*get_rss_key_size)(struct hnae3_handle *handle);
+ int (*get_rss)(struct hnae3_handle *handle, u32 *indir, u8 *key,
+ u8 *hfunc);
+ int (*set_rss)(struct hnae3_handle *handle, const u32 *indir,
+ const u8 *key, const u8 hfunc);
+ int (*set_rss_tuple)(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *cmd);
+ int (*get_rss_tuple)(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *cmd);
+
+ int (*get_tc_size)(struct hnae3_handle *handle);
+
+ int (*get_vector)(struct hnae3_handle *handle, u16 vector_num,
+ struct hnae3_vector_info *vector_info);
+ int (*put_vector)(struct hnae3_handle *handle, int vector_num);
+ int (*map_ring_to_vector)(struct hnae3_handle *handle,
+ int vector_num,
+ struct hnae3_ring_chain_node *vr_chain);
+ int (*unmap_ring_from_vector)(struct hnae3_handle *handle,
+ int vector_num,
+ struct hnae3_ring_chain_node *vr_chain);
+
+ int (*reset_queue)(struct hnae3_handle *handle);
+ u32 (*get_fw_version)(struct hnae3_handle *handle);
+ void (*get_mdix_mode)(struct hnae3_handle *handle,
+ u8 *tp_mdix_ctrl, u8 *tp_mdix);
+
+ int (*enable_vlan_filter)(struct hnae3_handle *handle, bool enable);
+ int (*set_vlan_filter)(struct hnae3_handle *handle, __be16 proto,
+ u16 vlan_id, bool is_kill);
+ int (*set_vf_vlan_filter)(struct hnae3_handle *handle, int vfid,
+ u16 vlan, u8 qos, __be16 proto);
+ int (*enable_hw_strip_rxvtag)(struct hnae3_handle *handle, bool enable);
+ void (*reset_event)(struct pci_dev *pdev, struct hnae3_handle *handle);
+ enum hnae3_reset_type (*get_reset_level)(struct hnae3_ae_dev *ae_dev,
+ unsigned long *addr);
+ void (*set_default_reset_request)(struct hnae3_ae_dev *ae_dev,
+ enum hnae3_reset_type rst_type);
+ void (*get_channels)(struct hnae3_handle *handle,
+ struct ethtool_channels *ch);
+ void (*get_tqps_and_rss_info)(struct hnae3_handle *h,
+ u16 *alloc_tqps, u16 *max_rss_size);
+ int (*set_channels)(struct hnae3_handle *handle, u32 new_tqps_num,
+ bool rxfh_configured);
+ void (*get_flowctrl_adv)(struct hnae3_handle *handle,
+ u32 *flowctrl_adv);
+ int (*set_led_id)(struct hnae3_handle *handle,
+ enum ethtool_phys_id_state status);
+ void (*get_link_mode)(struct hnae3_handle *handle,
+ unsigned long *supported,
+ unsigned long *advertising);
+ int (*add_fd_entry)(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *cmd);
+ int (*del_fd_entry)(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *cmd);
+ int (*get_fd_rule_cnt)(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *cmd);
+ int (*get_fd_rule_info)(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *cmd);
+ int (*get_fd_all_rules)(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *cmd, u32 *rule_locs);
+ void (*enable_fd)(struct hnae3_handle *handle, bool enable);
+ int (*add_arfs_entry)(struct hnae3_handle *handle, u16 queue_id,
+ u16 flow_id, struct flow_keys *fkeys);
+ int (*dbg_read_cmd)(struct hnae3_handle *handle, enum hnae3_dbg_cmd cmd,
+ char *buf, int len);
+ pci_ers_result_t (*handle_hw_ras_error)(struct hnae3_ae_dev *ae_dev);
+ bool (*get_hw_reset_stat)(struct hnae3_handle *handle);
+ bool (*ae_dev_resetting)(struct hnae3_handle *handle);
+ unsigned long (*ae_dev_reset_cnt)(struct hnae3_handle *handle);
+ int (*set_gro_en)(struct hnae3_handle *handle, bool enable);
+ u16 (*get_global_queue_id)(struct hnae3_handle *handle, u16 queue_id);
+ void (*set_timer_task)(struct hnae3_handle *handle, bool enable);
+ int (*mac_connect_phy)(struct hnae3_handle *handle);
+ void (*mac_disconnect_phy)(struct hnae3_handle *handle);
+ int (*get_vf_config)(struct hnae3_handle *handle, int vf,
+ struct ifla_vf_info *ivf);
+ int (*set_vf_link_state)(struct hnae3_handle *handle, int vf,
+ int link_state);
+ int (*set_vf_spoofchk)(struct hnae3_handle *handle, int vf,
+ bool enable);
+ int (*set_vf_trust)(struct hnae3_handle *handle, int vf, bool enable);
+ int (*set_vf_rate)(struct hnae3_handle *handle, int vf,
+ int min_tx_rate, int max_tx_rate, bool force);
+ int (*set_vf_mac)(struct hnae3_handle *handle, int vf, u8 *p);
+ int (*get_module_eeprom)(struct hnae3_handle *handle, u32 offset,
+ u32 len, u8 *data);
+ bool (*get_cmdq_stat)(struct hnae3_handle *handle);
+ int (*add_cls_flower)(struct hnae3_handle *handle,
+ struct flow_cls_offload *cls_flower, int tc);
+ int (*del_cls_flower)(struct hnae3_handle *handle,
+ struct flow_cls_offload *cls_flower);
+ bool (*cls_flower_active)(struct hnae3_handle *handle);
+ int (*get_phy_link_ksettings)(struct hnae3_handle *handle,
+ struct ethtool_link_ksettings *cmd);
+ int (*set_phy_link_ksettings)(struct hnae3_handle *handle,
+ const struct ethtool_link_ksettings *cmd);
+ bool (*set_tx_hwts_info)(struct hnae3_handle *handle,
+ struct sk_buff *skb);
+ void (*get_rx_hwts)(struct hnae3_handle *handle, struct sk_buff *skb,
+ u32 nsec, u32 sec);
+ int (*get_ts_info)(struct hnae3_handle *handle,
+ struct ethtool_ts_info *info);
+ int (*get_link_diagnosis_info)(struct hnae3_handle *handle,
+ u32 *status_code);
+ void (*clean_vf_config)(struct hnae3_ae_dev *ae_dev, int num_vfs);
+ int (*get_dscp_prio)(struct hnae3_handle *handle, u8 dscp,
+ u8 *tc_map_mode, u8 *priority);
+ void (*get_wol)(struct hnae3_handle *handle,
+ struct ethtool_wolinfo *wol);
+ int (*set_wol)(struct hnae3_handle *handle,
+ struct ethtool_wolinfo *wol);
+};
+
+struct hnae3_dcb_ops {
+ /* IEEE 802.1Qaz std */
+ int (*ieee_getets)(struct hnae3_handle *, struct ieee_ets *);
+ int (*ieee_setets)(struct hnae3_handle *, struct ieee_ets *);
+ int (*ieee_getpfc)(struct hnae3_handle *, struct ieee_pfc *);
+ int (*ieee_setpfc)(struct hnae3_handle *, struct ieee_pfc *);
+ int (*ieee_setapp)(struct hnae3_handle *h, struct dcb_app *app);
+ int (*ieee_delapp)(struct hnae3_handle *h, struct dcb_app *app);
+
+ /* DCBX configuration */
+ u8 (*getdcbx)(struct hnae3_handle *);
+ u8 (*setdcbx)(struct hnae3_handle *, u8);
+
+ int (*setup_tc)(struct hnae3_handle *handle,
+ struct tc_mqprio_qopt_offload *mqprio_qopt);
+};
+
+struct hnae3_ae_algo {
+ const struct hnae3_ae_ops *ops;
+ struct list_head node;
+ const struct pci_device_id *pdev_id_table;
+};
+
+#define HNAE3_INT_NAME_LEN 32
+#define HNAE3_ITR_COUNTDOWN_START 100
+
+#define HNAE3_MAX_TC 8
+#define HNAE3_MAX_USER_PRIO 8
+struct hnae3_tc_info {
+ u8 prio_tc[HNAE3_MAX_USER_PRIO]; /* TC indexed by prio */
+ u16 tqp_count[HNAE3_MAX_TC];
+ u16 tqp_offset[HNAE3_MAX_TC];
+ u8 max_tc; /* Total number of TCs */
+ u8 num_tc; /* Total number of enabled TCs */
+ bool mqprio_active;
+ bool dcb_ets_active;
+};
+
+#define HNAE3_MAX_DSCP 64
+#define HNAE3_PRIO_ID_INVALID 0xff
+struct hnae3_knic_private_info {
+ struct net_device *netdev; /* Set by KNIC client when init instance */
+ u16 rss_size; /* Allocated RSS queues */
+ u16 req_rss_size;
+ u16 rx_buf_len;
+ u16 num_tx_desc;
+ u16 num_rx_desc;
+ u32 tx_spare_buf_size;
+
+ struct hnae3_tc_info tc_info;
+ u8 tc_map_mode;
+ u8 dscp_app_cnt;
+ u8 dscp_prio[HNAE3_MAX_DSCP];
+
+ u16 num_tqps; /* total number of TQPs in this handle */
+ struct hnae3_queue **tqp; /* array base of all TQPs in this instance */
+ const struct hnae3_dcb_ops *dcb_ops;
+
+ u16 int_rl_setting;
+ void __iomem *io_base;
+};
+
+struct hnae3_roce_private_info {
+ struct net_device *netdev;
+ void __iomem *roce_io_base;
+ void __iomem *roce_mem_base;
+ int base_vector;
+ int num_vectors;
+
+ /* The below attributes defined for RoCE client, hnae3 gives
+ * initial values to them, and RoCE client can modify and use
+ * them.
+ */
+ unsigned long reset_state;
+ unsigned long instance_state;
+ unsigned long state;
+};
+
+#define HNAE3_SUPPORT_APP_LOOPBACK BIT(0)
+#define HNAE3_SUPPORT_PHY_LOOPBACK BIT(1)
+#define HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK BIT(2)
+#define HNAE3_SUPPORT_VF BIT(3)
+#define HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK BIT(4)
+#define HNAE3_SUPPORT_EXTERNAL_LOOPBACK BIT(5)
+
+#define HNAE3_USER_UPE BIT(0) /* unicast promisc enabled by user */
+#define HNAE3_USER_MPE BIT(1) /* mulitcast promisc enabled by user */
+#define HNAE3_BPE BIT(2) /* broadcast promisc enable */
+#define HNAE3_OVERFLOW_UPE BIT(3) /* unicast mac vlan overflow */
+#define HNAE3_OVERFLOW_MPE BIT(4) /* multicast mac vlan overflow */
+#define HNAE3_UPE (HNAE3_USER_UPE | HNAE3_OVERFLOW_UPE)
+#define HNAE3_MPE (HNAE3_USER_MPE | HNAE3_OVERFLOW_MPE)
+
+enum hnae3_pflag {
+ HNAE3_PFLAG_LIMIT_PROMISC,
+ HNAE3_PFLAG_MAX
+};
+
+struct hnae3_handle {
+ struct hnae3_client *client;
+ struct pci_dev *pdev;
+ void *priv;
+ struct hnae3_ae_algo *ae_algo; /* the class who provides this handle */
+ u64 flags; /* Indicate the capabilities for this handle */
+
+ union {
+ struct net_device *netdev; /* first member */
+ struct hnae3_knic_private_info kinfo;
+ struct hnae3_roce_private_info rinfo;
+ };
+
+ u32 numa_node_mask; /* for multi-chip support */
+
+ enum hnae3_port_base_vlan_state port_base_vlan_state;
+
+ u8 netdev_flags;
+ struct dentry *hnae3_dbgfs;
+ /* protects concurrent contention between debugfs commands */
+ struct mutex dbgfs_lock;
+ char **dbgfs_buf;
+
+ /* Network interface message level enabled bits */
+ u32 msg_enable;
+
+ unsigned long supported_pflags;
+ unsigned long priv_flags;
+};
+
+#define hnae3_set_field(origin, mask, shift, val) \
+ do { \
+ (origin) &= (~(mask)); \
+ (origin) |= ((val) << (shift)) & (mask); \
+ } while (0)
+#define hnae3_get_field(origin, mask, shift) (((origin) & (mask)) >> (shift))
+
+#define hnae3_set_bit(origin, shift, val) \
+ hnae3_set_field(origin, 0x1 << (shift), shift, val)
+#define hnae3_get_bit(origin, shift) \
+ hnae3_get_field(origin, 0x1 << (shift), shift)
+
+#define HNAE3_FORMAT_MAC_ADDR_LEN 18
+#define HNAE3_FORMAT_MAC_ADDR_OFFSET_0 0
+#define HNAE3_FORMAT_MAC_ADDR_OFFSET_4 4
+#define HNAE3_FORMAT_MAC_ADDR_OFFSET_5 5
+
+static inline void hnae3_format_mac_addr(char *format_mac_addr,
+ const u8 *mac_addr)
+{
+ snprintf(format_mac_addr, HNAE3_FORMAT_MAC_ADDR_LEN, "%02x:**:**:**:%02x:%02x",
+ mac_addr[HNAE3_FORMAT_MAC_ADDR_OFFSET_0],
+ mac_addr[HNAE3_FORMAT_MAC_ADDR_OFFSET_4],
+ mac_addr[HNAE3_FORMAT_MAC_ADDR_OFFSET_5]);
+}
+
+int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev);
+void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev);
+
+void hnae3_unregister_ae_algo_prepare(struct hnae3_ae_algo *ae_algo);
+void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo);
+void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo);
+
+void hnae3_unregister_client(struct hnae3_client *client);
+int hnae3_register_client(struct hnae3_client *client);
+
+void hnae3_set_client_init_flag(struct hnae3_client *client,
+ struct hnae3_ae_dev *ae_dev,
+ unsigned int inited);
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
new file mode 100644
index 0000000000..dcecb23daa
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
@@ -0,0 +1,654 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2021-2021 Hisilicon Limited.
+
+#include "hnae3.h"
+#include "hclge_comm_cmd.h"
+
+static void hclge_comm_cmd_config_regs(struct hclge_comm_hw *hw,
+ struct hclge_comm_cmq_ring *ring)
+{
+ dma_addr_t dma = ring->desc_dma_addr;
+ u32 reg_val;
+
+ if (ring->ring_type == HCLGE_COMM_TYPE_CSQ) {
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG,
+ lower_32_bits(dma));
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG,
+ upper_32_bits(dma));
+ reg_val = hclge_comm_read_dev(hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG);
+ reg_val &= HCLGE_COMM_NIC_SW_RST_RDY;
+ reg_val |= ring->desc_num >> HCLGE_COMM_NIC_CMQ_DESC_NUM_S;
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG, reg_val);
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG, 0);
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_TAIL_REG, 0);
+ } else {
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG,
+ lower_32_bits(dma));
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG,
+ upper_32_bits(dma));
+ reg_val = ring->desc_num >> HCLGE_COMM_NIC_CMQ_DESC_NUM_S;
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_DEPTH_REG, reg_val);
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_HEAD_REG, 0);
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG, 0);
+ }
+}
+
+void hclge_comm_cmd_init_regs(struct hclge_comm_hw *hw)
+{
+ hclge_comm_cmd_config_regs(hw, &hw->cmq.csq);
+ hclge_comm_cmd_config_regs(hw, &hw->cmq.crq);
+}
+
+void hclge_comm_cmd_reuse_desc(struct hclge_desc *desc, bool is_read)
+{
+ desc->flag = cpu_to_le16(HCLGE_COMM_CMD_FLAG_NO_INTR |
+ HCLGE_COMM_CMD_FLAG_IN);
+ if (is_read)
+ desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_WR);
+ else
+ desc->flag &= cpu_to_le16(~HCLGE_COMM_CMD_FLAG_WR);
+}
+
+static void hclge_comm_set_default_capability(struct hnae3_ae_dev *ae_dev,
+ bool is_pf)
+{
+ set_bit(HNAE3_DEV_SUPPORT_GRO_B, ae_dev->caps);
+ if (is_pf) {
+ set_bit(HNAE3_DEV_SUPPORT_FD_B, ae_dev->caps);
+ set_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps);
+ set_bit(HNAE3_DEV_SUPPORT_PAUSE_B, ae_dev->caps);
+ }
+}
+
+void hclge_comm_cmd_setup_basic_desc(struct hclge_desc *desc,
+ enum hclge_opcode_type opcode,
+ bool is_read)
+{
+ memset((void *)desc, 0, sizeof(struct hclge_desc));
+ desc->opcode = cpu_to_le16(opcode);
+ desc->flag = cpu_to_le16(HCLGE_COMM_CMD_FLAG_NO_INTR |
+ HCLGE_COMM_CMD_FLAG_IN);
+
+ if (is_read)
+ desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_WR);
+}
+
+int hclge_comm_firmware_compat_config(struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_hw *hw, bool en)
+{
+ struct hclge_comm_firmware_compat_cmd *req;
+ struct hclge_desc desc;
+ u32 compat = 0;
+
+ hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_IMP_COMPAT_CFG, false);
+
+ if (en) {
+ req = (struct hclge_comm_firmware_compat_cmd *)desc.data;
+
+ hnae3_set_bit(compat, HCLGE_COMM_LINK_EVENT_REPORT_EN_B, 1);
+ hnae3_set_bit(compat, HCLGE_COMM_NCSI_ERROR_REPORT_EN_B, 1);
+ if (hclge_comm_dev_phy_imp_supported(ae_dev))
+ hnae3_set_bit(compat, HCLGE_COMM_PHY_IMP_EN_B, 1);
+ hnae3_set_bit(compat, HCLGE_COMM_MAC_STATS_EXT_EN_B, 1);
+ hnae3_set_bit(compat, HCLGE_COMM_SYNC_RX_RING_HEAD_EN_B, 1);
+ hnae3_set_bit(compat, HCLGE_COMM_LLRS_FEC_EN_B, 1);
+
+ req->compat = cpu_to_le32(compat);
+ }
+
+ return hclge_comm_cmd_send(hw, &desc, 1);
+}
+
+void hclge_comm_free_cmd_desc(struct hclge_comm_cmq_ring *ring)
+{
+ int size = ring->desc_num * sizeof(struct hclge_desc);
+
+ if (!ring->desc)
+ return;
+
+ dma_free_coherent(&ring->pdev->dev, size,
+ ring->desc, ring->desc_dma_addr);
+ ring->desc = NULL;
+}
+
+static int hclge_comm_alloc_cmd_desc(struct hclge_comm_cmq_ring *ring)
+{
+ int size = ring->desc_num * sizeof(struct hclge_desc);
+
+ ring->desc = dma_alloc_coherent(&ring->pdev->dev,
+ size, &ring->desc_dma_addr, GFP_KERNEL);
+ if (!ring->desc)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static __le32 hclge_comm_build_api_caps(void)
+{
+ u32 api_caps = 0;
+
+ hnae3_set_bit(api_caps, HCLGE_COMM_API_CAP_FLEX_RSS_TBL_B, 1);
+
+ return cpu_to_le32(api_caps);
+}
+
+static const struct hclge_comm_caps_bit_map hclge_pf_cmd_caps[] = {
+ {HCLGE_COMM_CAP_UDP_GSO_B, HNAE3_DEV_SUPPORT_UDP_GSO_B},
+ {HCLGE_COMM_CAP_PTP_B, HNAE3_DEV_SUPPORT_PTP_B},
+ {HCLGE_COMM_CAP_INT_QL_B, HNAE3_DEV_SUPPORT_INT_QL_B},
+ {HCLGE_COMM_CAP_TQP_TXRX_INDEP_B, HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B},
+ {HCLGE_COMM_CAP_HW_TX_CSUM_B, HNAE3_DEV_SUPPORT_HW_TX_CSUM_B},
+ {HCLGE_COMM_CAP_UDP_TUNNEL_CSUM_B, HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B},
+ {HCLGE_COMM_CAP_FD_FORWARD_TC_B, HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B},
+ {HCLGE_COMM_CAP_FEC_B, HNAE3_DEV_SUPPORT_FEC_B},
+ {HCLGE_COMM_CAP_PAUSE_B, HNAE3_DEV_SUPPORT_PAUSE_B},
+ {HCLGE_COMM_CAP_PHY_IMP_B, HNAE3_DEV_SUPPORT_PHY_IMP_B},
+ {HCLGE_COMM_CAP_QB_B, HNAE3_DEV_SUPPORT_QB_B},
+ {HCLGE_COMM_CAP_TX_PUSH_B, HNAE3_DEV_SUPPORT_TX_PUSH_B},
+ {HCLGE_COMM_CAP_RAS_IMP_B, HNAE3_DEV_SUPPORT_RAS_IMP_B},
+ {HCLGE_COMM_CAP_RXD_ADV_LAYOUT_B, HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B},
+ {HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B,
+ HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B},
+ {HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B, HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B},
+ {HCLGE_COMM_CAP_CQ_B, HNAE3_DEV_SUPPORT_CQ_B},
+ {HCLGE_COMM_CAP_GRO_B, HNAE3_DEV_SUPPORT_GRO_B},
+ {HCLGE_COMM_CAP_FD_B, HNAE3_DEV_SUPPORT_FD_B},
+ {HCLGE_COMM_CAP_FEC_STATS_B, HNAE3_DEV_SUPPORT_FEC_STATS_B},
+ {HCLGE_COMM_CAP_LANE_NUM_B, HNAE3_DEV_SUPPORT_LANE_NUM_B},
+ {HCLGE_COMM_CAP_WOL_B, HNAE3_DEV_SUPPORT_WOL_B},
+ {HCLGE_COMM_CAP_TM_FLUSH_B, HNAE3_DEV_SUPPORT_TM_FLUSH_B},
+};
+
+static const struct hclge_comm_caps_bit_map hclge_vf_cmd_caps[] = {
+ {HCLGE_COMM_CAP_UDP_GSO_B, HNAE3_DEV_SUPPORT_UDP_GSO_B},
+ {HCLGE_COMM_CAP_INT_QL_B, HNAE3_DEV_SUPPORT_INT_QL_B},
+ {HCLGE_COMM_CAP_TQP_TXRX_INDEP_B, HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B},
+ {HCLGE_COMM_CAP_HW_TX_CSUM_B, HNAE3_DEV_SUPPORT_HW_TX_CSUM_B},
+ {HCLGE_COMM_CAP_UDP_TUNNEL_CSUM_B, HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B},
+ {HCLGE_COMM_CAP_QB_B, HNAE3_DEV_SUPPORT_QB_B},
+ {HCLGE_COMM_CAP_TX_PUSH_B, HNAE3_DEV_SUPPORT_TX_PUSH_B},
+ {HCLGE_COMM_CAP_RXD_ADV_LAYOUT_B, HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B},
+ {HCLGE_COMM_CAP_CQ_B, HNAE3_DEV_SUPPORT_CQ_B},
+ {HCLGE_COMM_CAP_GRO_B, HNAE3_DEV_SUPPORT_GRO_B},
+};
+
+static void
+hclge_comm_capability_to_bitmap(unsigned long *bitmap, __le32 *caps)
+{
+ const unsigned int words = HCLGE_COMM_QUERY_CAP_LENGTH;
+ u32 val[HCLGE_COMM_QUERY_CAP_LENGTH];
+ unsigned int i;
+
+ for (i = 0; i < words; i++)
+ val[i] = __le32_to_cpu(caps[i]);
+
+ bitmap_from_arr32(bitmap, val,
+ HCLGE_COMM_QUERY_CAP_LENGTH * BITS_PER_TYPE(u32));
+}
+
+static void
+hclge_comm_parse_capability(struct hnae3_ae_dev *ae_dev, bool is_pf,
+ struct hclge_comm_query_version_cmd *cmd)
+{
+ const struct hclge_comm_caps_bit_map *caps_map =
+ is_pf ? hclge_pf_cmd_caps : hclge_vf_cmd_caps;
+ u32 size = is_pf ? ARRAY_SIZE(hclge_pf_cmd_caps) :
+ ARRAY_SIZE(hclge_vf_cmd_caps);
+ DECLARE_BITMAP(caps, HCLGE_COMM_QUERY_CAP_LENGTH * BITS_PER_TYPE(u32));
+ u32 i;
+
+ hclge_comm_capability_to_bitmap(caps, cmd->caps);
+ for (i = 0; i < size; i++)
+ if (test_bit(caps_map[i].imp_bit, caps))
+ set_bit(caps_map[i].local_bit, ae_dev->caps);
+}
+
+int hclge_comm_alloc_cmd_queue(struct hclge_comm_hw *hw, int ring_type)
+{
+ struct hclge_comm_cmq_ring *ring =
+ (ring_type == HCLGE_COMM_TYPE_CSQ) ? &hw->cmq.csq :
+ &hw->cmq.crq;
+ int ret;
+
+ ring->ring_type = ring_type;
+
+ ret = hclge_comm_alloc_cmd_desc(ring);
+ if (ret)
+ dev_err(&ring->pdev->dev, "descriptor %s alloc error %d\n",
+ (ring_type == HCLGE_COMM_TYPE_CSQ) ? "CSQ" : "CRQ",
+ ret);
+
+ return ret;
+}
+
+int hclge_comm_cmd_query_version_and_capability(struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_hw *hw,
+ u32 *fw_version, bool is_pf)
+{
+ struct hclge_comm_query_version_cmd *resp;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FW_VER, 1);
+ resp = (struct hclge_comm_query_version_cmd *)desc.data;
+ resp->api_caps = hclge_comm_build_api_caps();
+
+ ret = hclge_comm_cmd_send(hw, &desc, 1);
+ if (ret)
+ return ret;
+
+ *fw_version = le32_to_cpu(resp->firmware);
+
+ ae_dev->dev_version = le32_to_cpu(resp->hardware) <<
+ HNAE3_PCI_REVISION_BIT_SIZE;
+ ae_dev->dev_version |= ae_dev->pdev->revision;
+
+ if (ae_dev->dev_version == HNAE3_DEVICE_VERSION_V2) {
+ hclge_comm_set_default_capability(ae_dev, is_pf);
+ return 0;
+ }
+
+ hclge_comm_parse_capability(ae_dev, is_pf, resp);
+
+ return ret;
+}
+
+static const u16 spec_opcode[] = { HCLGE_OPC_STATS_64_BIT,
+ HCLGE_OPC_STATS_32_BIT,
+ HCLGE_OPC_STATS_MAC,
+ HCLGE_OPC_STATS_MAC_ALL,
+ HCLGE_OPC_QUERY_32_BIT_REG,
+ HCLGE_OPC_QUERY_64_BIT_REG,
+ HCLGE_QUERY_CLEAR_MPF_RAS_INT,
+ HCLGE_QUERY_CLEAR_PF_RAS_INT,
+ HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT,
+ HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT,
+ HCLGE_QUERY_ALL_ERR_INFO };
+
+static bool hclge_comm_is_special_opcode(u16 opcode)
+{
+ /* these commands have several descriptors,
+ * and use the first one to save opcode and return value
+ */
+ u32 i;
+
+ for (i = 0; i < ARRAY_SIZE(spec_opcode); i++)
+ if (spec_opcode[i] == opcode)
+ return true;
+
+ return false;
+}
+
+static int hclge_comm_ring_space(struct hclge_comm_cmq_ring *ring)
+{
+ int ntc = ring->next_to_clean;
+ int ntu = ring->next_to_use;
+ int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
+
+ return ring->desc_num - used - 1;
+}
+
+static void hclge_comm_cmd_copy_desc(struct hclge_comm_hw *hw,
+ struct hclge_desc *desc, int num)
+{
+ struct hclge_desc *desc_to_use;
+ int handle = 0;
+
+ while (handle < num) {
+ desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
+ *desc_to_use = desc[handle];
+ (hw->cmq.csq.next_to_use)++;
+ if (hw->cmq.csq.next_to_use >= hw->cmq.csq.desc_num)
+ hw->cmq.csq.next_to_use = 0;
+ handle++;
+ }
+}
+
+static int hclge_comm_is_valid_csq_clean_head(struct hclge_comm_cmq_ring *ring,
+ int head)
+{
+ int ntc = ring->next_to_clean;
+ int ntu = ring->next_to_use;
+
+ if (ntu > ntc)
+ return head >= ntc && head <= ntu;
+
+ return head >= ntc || head <= ntu;
+}
+
+static int hclge_comm_cmd_csq_clean(struct hclge_comm_hw *hw)
+{
+ struct hclge_comm_cmq_ring *csq = &hw->cmq.csq;
+ int clean;
+ u32 head;
+
+ head = hclge_comm_read_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG);
+ rmb(); /* Make sure head is ready before touch any data */
+
+ if (!hclge_comm_is_valid_csq_clean_head(csq, head)) {
+ dev_warn(&hw->cmq.csq.pdev->dev, "wrong cmd head (%u, %d-%d)\n",
+ head, csq->next_to_use, csq->next_to_clean);
+ dev_warn(&hw->cmq.csq.pdev->dev,
+ "Disabling any further commands to IMP firmware\n");
+ set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state);
+ dev_warn(&hw->cmq.csq.pdev->dev,
+ "IMP firmware watchdog reset soon expected!\n");
+ return -EIO;
+ }
+
+ clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num;
+ csq->next_to_clean = head;
+ return clean;
+}
+
+static int hclge_comm_cmd_csq_done(struct hclge_comm_hw *hw)
+{
+ u32 head = hclge_comm_read_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG);
+ return head == hw->cmq.csq.next_to_use;
+}
+
+static u32 hclge_get_cmdq_tx_timeout(u16 opcode, u32 tx_timeout)
+{
+ static const struct hclge_cmdq_tx_timeout_map cmdq_tx_timeout_map[] = {
+ {HCLGE_OPC_CFG_RST_TRIGGER, HCLGE_COMM_CMDQ_TX_TIMEOUT_500MS},
+ };
+ u32 i;
+
+ for (i = 0; i < ARRAY_SIZE(cmdq_tx_timeout_map); i++)
+ if (cmdq_tx_timeout_map[i].opcode == opcode)
+ return cmdq_tx_timeout_map[i].tx_timeout;
+
+ return tx_timeout;
+}
+
+static void hclge_comm_wait_for_resp(struct hclge_comm_hw *hw, u16 opcode,
+ bool *is_completed)
+{
+ u32 cmdq_tx_timeout = hclge_get_cmdq_tx_timeout(opcode,
+ hw->cmq.tx_timeout);
+ u32 timeout = 0;
+
+ do {
+ if (hclge_comm_cmd_csq_done(hw)) {
+ *is_completed = true;
+ break;
+ }
+ udelay(1);
+ timeout++;
+ } while (timeout < cmdq_tx_timeout);
+}
+
+static int hclge_comm_cmd_convert_err_code(u16 desc_ret)
+{
+ struct hclge_comm_errcode hclge_comm_cmd_errcode[] = {
+ { HCLGE_COMM_CMD_EXEC_SUCCESS, 0 },
+ { HCLGE_COMM_CMD_NO_AUTH, -EPERM },
+ { HCLGE_COMM_CMD_NOT_SUPPORTED, -EOPNOTSUPP },
+ { HCLGE_COMM_CMD_QUEUE_FULL, -EXFULL },
+ { HCLGE_COMM_CMD_NEXT_ERR, -ENOSR },
+ { HCLGE_COMM_CMD_UNEXE_ERR, -ENOTBLK },
+ { HCLGE_COMM_CMD_PARA_ERR, -EINVAL },
+ { HCLGE_COMM_CMD_RESULT_ERR, -ERANGE },
+ { HCLGE_COMM_CMD_TIMEOUT, -ETIME },
+ { HCLGE_COMM_CMD_HILINK_ERR, -ENOLINK },
+ { HCLGE_COMM_CMD_QUEUE_ILLEGAL, -ENXIO },
+ { HCLGE_COMM_CMD_INVALID, -EBADR },
+ };
+ u32 errcode_count = ARRAY_SIZE(hclge_comm_cmd_errcode);
+ u32 i;
+
+ for (i = 0; i < errcode_count; i++)
+ if (hclge_comm_cmd_errcode[i].imp_errcode == desc_ret)
+ return hclge_comm_cmd_errcode[i].common_errno;
+
+ return -EIO;
+}
+
+static int hclge_comm_cmd_check_retval(struct hclge_comm_hw *hw,
+ struct hclge_desc *desc, int num,
+ int ntc)
+{
+ u16 opcode, desc_ret;
+ int handle;
+
+ opcode = le16_to_cpu(desc[0].opcode);
+ for (handle = 0; handle < num; handle++) {
+ desc[handle] = hw->cmq.csq.desc[ntc];
+ ntc++;
+ if (ntc >= hw->cmq.csq.desc_num)
+ ntc = 0;
+ }
+ if (likely(!hclge_comm_is_special_opcode(opcode)))
+ desc_ret = le16_to_cpu(desc[num - 1].retval);
+ else
+ desc_ret = le16_to_cpu(desc[0].retval);
+
+ hw->cmq.last_status = desc_ret;
+
+ return hclge_comm_cmd_convert_err_code(desc_ret);
+}
+
+static int hclge_comm_cmd_check_result(struct hclge_comm_hw *hw,
+ struct hclge_desc *desc,
+ int num, int ntc)
+{
+ bool is_completed = false;
+ int handle, ret;
+
+ /* If the command is sync, wait for the firmware to write back,
+ * if multi descriptors to be sent, use the first one to check
+ */
+ if (HCLGE_COMM_SEND_SYNC(le16_to_cpu(desc->flag)))
+ hclge_comm_wait_for_resp(hw, le16_to_cpu(desc->opcode),
+ &is_completed);
+
+ if (!is_completed)
+ ret = -EBADE;
+ else
+ ret = hclge_comm_cmd_check_retval(hw, desc, num, ntc);
+
+ /* Clean the command send queue */
+ handle = hclge_comm_cmd_csq_clean(hw);
+ if (handle < 0)
+ ret = handle;
+ else if (handle != num)
+ dev_warn(&hw->cmq.csq.pdev->dev,
+ "cleaned %d, need to clean %d\n", handle, num);
+ return ret;
+}
+
+/**
+ * hclge_comm_cmd_send - send command to command queue
+ * @hw: pointer to the hw struct
+ * @desc: prefilled descriptor for describing the command
+ * @num : the number of descriptors to be sent
+ *
+ * This is the main send command for command queue, it
+ * sends the queue, cleans the queue, etc
+ **/
+int hclge_comm_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc,
+ int num)
+{
+ struct hclge_comm_cmq_ring *csq = &hw->cmq.csq;
+ int ret;
+ int ntc;
+
+ spin_lock_bh(&hw->cmq.csq.lock);
+
+ if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state)) {
+ spin_unlock_bh(&hw->cmq.csq.lock);
+ return -EBUSY;
+ }
+
+ if (num > hclge_comm_ring_space(&hw->cmq.csq)) {
+ /* If CMDQ ring is full, SW HEAD and HW HEAD may be different,
+ * need update the SW HEAD pointer csq->next_to_clean
+ */
+ csq->next_to_clean =
+ hclge_comm_read_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG);
+ spin_unlock_bh(&hw->cmq.csq.lock);
+ return -EBUSY;
+ }
+
+ /**
+ * Record the location of desc in the ring for this time
+ * which will be use for hardware to write back
+ */
+ ntc = hw->cmq.csq.next_to_use;
+
+ hclge_comm_cmd_copy_desc(hw, desc, num);
+
+ /* Write to hardware */
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_TAIL_REG,
+ hw->cmq.csq.next_to_use);
+
+ ret = hclge_comm_cmd_check_result(hw, desc, num, ntc);
+
+ spin_unlock_bh(&hw->cmq.csq.lock);
+
+ return ret;
+}
+
+static void hclge_comm_cmd_uninit_regs(struct hclge_comm_hw *hw)
+{
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG, 0);
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG, 0);
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG, 0);
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG, 0);
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_TAIL_REG, 0);
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG, 0);
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG, 0);
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_DEPTH_REG, 0);
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_HEAD_REG, 0);
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG, 0);
+}
+
+void hclge_comm_cmd_uninit(struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_hw *hw)
+{
+ struct hclge_comm_cmq *cmdq = &hw->cmq;
+
+ hclge_comm_firmware_compat_config(ae_dev, hw, false);
+ set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state);
+
+ /* wait to ensure that the firmware completes the possible left
+ * over commands.
+ */
+ msleep(HCLGE_COMM_CMDQ_CLEAR_WAIT_TIME);
+ spin_lock_bh(&cmdq->csq.lock);
+ spin_lock(&cmdq->crq.lock);
+ hclge_comm_cmd_uninit_regs(hw);
+ spin_unlock(&cmdq->crq.lock);
+ spin_unlock_bh(&cmdq->csq.lock);
+
+ hclge_comm_free_cmd_desc(&cmdq->csq);
+ hclge_comm_free_cmd_desc(&cmdq->crq);
+}
+
+int hclge_comm_cmd_queue_init(struct pci_dev *pdev, struct hclge_comm_hw *hw)
+{
+ struct hclge_comm_cmq *cmdq = &hw->cmq;
+ int ret;
+
+ /* Setup the lock for command queue */
+ spin_lock_init(&cmdq->csq.lock);
+ spin_lock_init(&cmdq->crq.lock);
+
+ cmdq->csq.pdev = pdev;
+ cmdq->crq.pdev = pdev;
+
+ /* Setup the queue entries for use cmd queue */
+ cmdq->csq.desc_num = HCLGE_COMM_NIC_CMQ_DESC_NUM;
+ cmdq->crq.desc_num = HCLGE_COMM_NIC_CMQ_DESC_NUM;
+
+ /* Setup Tx write back timeout */
+ cmdq->tx_timeout = HCLGE_COMM_CMDQ_TX_TIMEOUT_DEFAULT;
+
+ /* Setup queue rings */
+ ret = hclge_comm_alloc_cmd_queue(hw, HCLGE_COMM_TYPE_CSQ);
+ if (ret) {
+ dev_err(&pdev->dev, "CSQ ring setup error %d\n", ret);
+ return ret;
+ }
+
+ ret = hclge_comm_alloc_cmd_queue(hw, HCLGE_COMM_TYPE_CRQ);
+ if (ret) {
+ dev_err(&pdev->dev, "CRQ ring setup error %d\n", ret);
+ goto err_csq;
+ }
+
+ return 0;
+err_csq:
+ hclge_comm_free_cmd_desc(&hw->cmq.csq);
+ return ret;
+}
+
+int hclge_comm_cmd_init(struct hnae3_ae_dev *ae_dev, struct hclge_comm_hw *hw,
+ u32 *fw_version, bool is_pf,
+ unsigned long reset_pending)
+{
+ struct hclge_comm_cmq *cmdq = &hw->cmq;
+ int ret;
+
+ spin_lock_bh(&cmdq->csq.lock);
+ spin_lock(&cmdq->crq.lock);
+
+ cmdq->csq.next_to_clean = 0;
+ cmdq->csq.next_to_use = 0;
+ cmdq->crq.next_to_clean = 0;
+ cmdq->crq.next_to_use = 0;
+
+ hclge_comm_cmd_init_regs(hw);
+
+ spin_unlock(&cmdq->crq.lock);
+ spin_unlock_bh(&cmdq->csq.lock);
+
+ clear_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state);
+
+ /* Check if there is new reset pending, because the higher level
+ * reset may happen when lower level reset is being processed.
+ */
+ if (reset_pending) {
+ ret = -EBUSY;
+ goto err_cmd_init;
+ }
+
+ /* get version and device capabilities */
+ ret = hclge_comm_cmd_query_version_and_capability(ae_dev, hw,
+ fw_version, is_pf);
+ if (ret) {
+ dev_err(&ae_dev->pdev->dev,
+ "failed to query version and capabilities, ret = %d\n",
+ ret);
+ goto err_cmd_init;
+ }
+
+ dev_info(&ae_dev->pdev->dev,
+ "The firmware version is %lu.%lu.%lu.%lu\n",
+ hnae3_get_field(*fw_version, HNAE3_FW_VERSION_BYTE3_MASK,
+ HNAE3_FW_VERSION_BYTE3_SHIFT),
+ hnae3_get_field(*fw_version, HNAE3_FW_VERSION_BYTE2_MASK,
+ HNAE3_FW_VERSION_BYTE2_SHIFT),
+ hnae3_get_field(*fw_version, HNAE3_FW_VERSION_BYTE1_MASK,
+ HNAE3_FW_VERSION_BYTE1_SHIFT),
+ hnae3_get_field(*fw_version, HNAE3_FW_VERSION_BYTE0_MASK,
+ HNAE3_FW_VERSION_BYTE0_SHIFT));
+
+ if (!is_pf && ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3)
+ return 0;
+
+ /* ask the firmware to enable some features, driver can work without
+ * it.
+ */
+ ret = hclge_comm_firmware_compat_config(ae_dev, hw, true);
+ if (ret)
+ dev_warn(&ae_dev->pdev->dev,
+ "Firmware compatible features not enabled(%d).\n",
+ ret);
+ return 0;
+
+err_cmd_init:
+ set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state);
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h
new file mode 100644
index 0000000000..2b7197ce0a
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h
@@ -0,0 +1,476 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+// Copyright (c) 2021-2021 Hisilicon Limited.
+
+#ifndef __HCLGE_COMM_CMD_H
+#define __HCLGE_COMM_CMD_H
+#include <linux/types.h>
+
+#include "hnae3.h"
+
+#define HCLGE_COMM_CMD_FLAG_IN BIT(0)
+#define HCLGE_COMM_CMD_FLAG_NEXT BIT(2)
+#define HCLGE_COMM_CMD_FLAG_WR BIT(3)
+#define HCLGE_COMM_CMD_FLAG_NO_INTR BIT(4)
+
+#define HCLGE_COMM_SEND_SYNC(flag) \
+ ((flag) & HCLGE_COMM_CMD_FLAG_NO_INTR)
+
+#define HCLGE_COMM_LINK_EVENT_REPORT_EN_B 0
+#define HCLGE_COMM_NCSI_ERROR_REPORT_EN_B 1
+#define HCLGE_COMM_PHY_IMP_EN_B 2
+#define HCLGE_COMM_MAC_STATS_EXT_EN_B 3
+#define HCLGE_COMM_SYNC_RX_RING_HEAD_EN_B 4
+#define HCLGE_COMM_LLRS_FEC_EN_B 5
+
+#define hclge_comm_dev_phy_imp_supported(ae_dev) \
+ test_bit(HNAE3_DEV_SUPPORT_PHY_IMP_B, (ae_dev)->caps)
+
+#define HCLGE_COMM_TYPE_CRQ 0
+#define HCLGE_COMM_TYPE_CSQ 1
+
+#define HCLGE_COMM_CMDQ_CLEAR_WAIT_TIME 200
+
+/* bar registers for cmdq */
+#define HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG 0x27000
+#define HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG 0x27004
+#define HCLGE_COMM_NIC_CSQ_DEPTH_REG 0x27008
+#define HCLGE_COMM_NIC_CSQ_TAIL_REG 0x27010
+#define HCLGE_COMM_NIC_CSQ_HEAD_REG 0x27014
+#define HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG 0x27018
+#define HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG 0x2701C
+#define HCLGE_COMM_NIC_CRQ_DEPTH_REG 0x27020
+#define HCLGE_COMM_NIC_CRQ_TAIL_REG 0x27024
+#define HCLGE_COMM_NIC_CRQ_HEAD_REG 0x27028
+/* Vector0 interrupt CMDQ event source register(RW) */
+#define HCLGE_COMM_VECTOR0_CMDQ_SRC_REG 0x27100
+/* Vector0 interrupt CMDQ event status register(RO) */
+#define HCLGE_COMM_VECTOR0_CMDQ_STATE_REG 0x27104
+#define HCLGE_COMM_CMDQ_INTR_EN_REG 0x27108
+#define HCLGE_COMM_CMDQ_INTR_GEN_REG 0x2710C
+#define HCLGE_COMM_CMDQ_INTR_STS_REG 0x27104
+
+/* this bit indicates that the driver is ready for hardware reset */
+#define HCLGE_COMM_NIC_SW_RST_RDY_B 16
+#define HCLGE_COMM_NIC_SW_RST_RDY BIT(HCLGE_COMM_NIC_SW_RST_RDY_B)
+#define HCLGE_COMM_NIC_CMQ_DESC_NUM_S 3
+#define HCLGE_COMM_NIC_CMQ_DESC_NUM 1024
+#define HCLGE_COMM_CMDQ_TX_TIMEOUT_DEFAULT 30000
+#define HCLGE_COMM_CMDQ_TX_TIMEOUT_500MS 500000
+
+enum hclge_opcode_type {
+ /* Generic commands */
+ HCLGE_OPC_QUERY_FW_VER = 0x0001,
+ HCLGE_OPC_CFG_RST_TRIGGER = 0x0020,
+ HCLGE_OPC_GBL_RST_STATUS = 0x0021,
+ HCLGE_OPC_QUERY_FUNC_STATUS = 0x0022,
+ HCLGE_OPC_QUERY_PF_RSRC = 0x0023,
+ HCLGE_OPC_QUERY_VF_RSRC = 0x0024,
+ HCLGE_OPC_GET_CFG_PARAM = 0x0025,
+ HCLGE_OPC_PF_RST_DONE = 0x0026,
+ HCLGE_OPC_QUERY_VF_RST_RDY = 0x0027,
+
+ HCLGE_OPC_STATS_64_BIT = 0x0030,
+ HCLGE_OPC_STATS_32_BIT = 0x0031,
+ HCLGE_OPC_STATS_MAC = 0x0032,
+ HCLGE_OPC_QUERY_MAC_REG_NUM = 0x0033,
+ HCLGE_OPC_STATS_MAC_ALL = 0x0034,
+
+ HCLGE_OPC_QUERY_REG_NUM = 0x0040,
+ HCLGE_OPC_QUERY_32_BIT_REG = 0x0041,
+ HCLGE_OPC_QUERY_64_BIT_REG = 0x0042,
+ HCLGE_OPC_DFX_BD_NUM = 0x0043,
+ HCLGE_OPC_DFX_BIOS_COMMON_REG = 0x0044,
+ HCLGE_OPC_DFX_SSU_REG_0 = 0x0045,
+ HCLGE_OPC_DFX_SSU_REG_1 = 0x0046,
+ HCLGE_OPC_DFX_IGU_EGU_REG = 0x0047,
+ HCLGE_OPC_DFX_RPU_REG_0 = 0x0048,
+ HCLGE_OPC_DFX_RPU_REG_1 = 0x0049,
+ HCLGE_OPC_DFX_NCSI_REG = 0x004A,
+ HCLGE_OPC_DFX_RTC_REG = 0x004B,
+ HCLGE_OPC_DFX_PPP_REG = 0x004C,
+ HCLGE_OPC_DFX_RCB_REG = 0x004D,
+ HCLGE_OPC_DFX_TQP_REG = 0x004E,
+ HCLGE_OPC_DFX_SSU_REG_2 = 0x004F,
+
+ HCLGE_OPC_QUERY_DEV_SPECS = 0x0050,
+
+ /* MAC command */
+ HCLGE_OPC_CONFIG_MAC_MODE = 0x0301,
+ HCLGE_OPC_CONFIG_AN_MODE = 0x0304,
+ HCLGE_OPC_QUERY_LINK_STATUS = 0x0307,
+ HCLGE_OPC_CONFIG_MAX_FRM_SIZE = 0x0308,
+ HCLGE_OPC_CONFIG_SPEED_DUP = 0x0309,
+ HCLGE_OPC_QUERY_MAC_TNL_INT = 0x0310,
+ HCLGE_OPC_MAC_TNL_INT_EN = 0x0311,
+ HCLGE_OPC_CLEAR_MAC_TNL_INT = 0x0312,
+ HCLGE_OPC_COMMON_LOOPBACK = 0x0315,
+ HCLGE_OPC_QUERY_FEC_STATS = 0x0316,
+ HCLGE_OPC_CONFIG_FEC_MODE = 0x031A,
+ HCLGE_OPC_QUERY_ROH_TYPE_INFO = 0x0389,
+
+ /* PTP commands */
+ HCLGE_OPC_PTP_INT_EN = 0x0501,
+ HCLGE_OPC_PTP_MODE_CFG = 0x0507,
+
+ /* PFC/Pause commands */
+ HCLGE_OPC_CFG_MAC_PAUSE_EN = 0x0701,
+ HCLGE_OPC_CFG_PFC_PAUSE_EN = 0x0702,
+ HCLGE_OPC_CFG_MAC_PARA = 0x0703,
+ HCLGE_OPC_CFG_PFC_PARA = 0x0704,
+ HCLGE_OPC_QUERY_MAC_TX_PKT_CNT = 0x0705,
+ HCLGE_OPC_QUERY_MAC_RX_PKT_CNT = 0x0706,
+ HCLGE_OPC_QUERY_PFC_TX_PKT_CNT = 0x0707,
+ HCLGE_OPC_QUERY_PFC_RX_PKT_CNT = 0x0708,
+ HCLGE_OPC_PRI_TO_TC_MAPPING = 0x0709,
+ HCLGE_OPC_QOS_MAP = 0x070A,
+
+ /* ETS/scheduler commands */
+ HCLGE_OPC_TM_PG_TO_PRI_LINK = 0x0804,
+ HCLGE_OPC_TM_QS_TO_PRI_LINK = 0x0805,
+ HCLGE_OPC_TM_NQ_TO_QS_LINK = 0x0806,
+ HCLGE_OPC_TM_RQ_TO_QS_LINK = 0x0807,
+ HCLGE_OPC_TM_PORT_WEIGHT = 0x0808,
+ HCLGE_OPC_TM_PG_WEIGHT = 0x0809,
+ HCLGE_OPC_TM_QS_WEIGHT = 0x080A,
+ HCLGE_OPC_TM_PRI_WEIGHT = 0x080B,
+ HCLGE_OPC_TM_PRI_C_SHAPPING = 0x080C,
+ HCLGE_OPC_TM_PRI_P_SHAPPING = 0x080D,
+ HCLGE_OPC_TM_PG_C_SHAPPING = 0x080E,
+ HCLGE_OPC_TM_PG_P_SHAPPING = 0x080F,
+ HCLGE_OPC_TM_PORT_SHAPPING = 0x0810,
+ HCLGE_OPC_TM_PG_SCH_MODE_CFG = 0x0812,
+ HCLGE_OPC_TM_PRI_SCH_MODE_CFG = 0x0813,
+ HCLGE_OPC_TM_QS_SCH_MODE_CFG = 0x0814,
+ HCLGE_OPC_TM_BP_TO_QSET_MAPPING = 0x0815,
+ HCLGE_OPC_TM_NODES = 0x0816,
+ HCLGE_OPC_ETS_TC_WEIGHT = 0x0843,
+ HCLGE_OPC_QSET_DFX_STS = 0x0844,
+ HCLGE_OPC_PRI_DFX_STS = 0x0845,
+ HCLGE_OPC_PG_DFX_STS = 0x0846,
+ HCLGE_OPC_PORT_DFX_STS = 0x0847,
+ HCLGE_OPC_SCH_NQ_CNT = 0x0848,
+ HCLGE_OPC_SCH_RQ_CNT = 0x0849,
+ HCLGE_OPC_TM_INTERNAL_STS = 0x0850,
+ HCLGE_OPC_TM_INTERNAL_CNT = 0x0851,
+ HCLGE_OPC_TM_INTERNAL_STS_1 = 0x0852,
+ HCLGE_OPC_TM_FLUSH = 0x0872,
+
+ /* Packet buffer allocate commands */
+ HCLGE_OPC_TX_BUFF_ALLOC = 0x0901,
+ HCLGE_OPC_RX_PRIV_BUFF_ALLOC = 0x0902,
+ HCLGE_OPC_RX_PRIV_WL_ALLOC = 0x0903,
+ HCLGE_OPC_RX_COM_THRD_ALLOC = 0x0904,
+ HCLGE_OPC_RX_COM_WL_ALLOC = 0x0905,
+ HCLGE_OPC_RX_GBL_PKT_CNT = 0x0906,
+
+ /* TQP management command */
+ HCLGE_OPC_SET_TQP_MAP = 0x0A01,
+
+ /* TQP commands */
+ HCLGE_OPC_CFG_TX_QUEUE = 0x0B01,
+ HCLGE_OPC_QUERY_TX_POINTER = 0x0B02,
+ HCLGE_OPC_QUERY_TX_STATS = 0x0B03,
+ HCLGE_OPC_TQP_TX_QUEUE_TC = 0x0B04,
+ HCLGE_OPC_CFG_RX_QUEUE = 0x0B11,
+ HCLGE_OPC_QUERY_RX_POINTER = 0x0B12,
+ HCLGE_OPC_QUERY_RX_STATS = 0x0B13,
+ HCLGE_OPC_STASH_RX_QUEUE_LRO = 0x0B16,
+ HCLGE_OPC_CFG_RX_QUEUE_LRO = 0x0B17,
+ HCLGE_OPC_CFG_COM_TQP_QUEUE = 0x0B20,
+ HCLGE_OPC_RESET_TQP_QUEUE = 0x0B22,
+
+ /* PPU commands */
+ HCLGE_OPC_PPU_PF_OTHER_INT_DFX = 0x0B4A,
+
+ /* TSO command */
+ HCLGE_OPC_TSO_GENERIC_CONFIG = 0x0C01,
+ HCLGE_OPC_GRO_GENERIC_CONFIG = 0x0C10,
+
+ /* RSS commands */
+ HCLGE_OPC_RSS_GENERIC_CONFIG = 0x0D01,
+ HCLGE_OPC_RSS_INDIR_TABLE = 0x0D07,
+ HCLGE_OPC_RSS_TC_MODE = 0x0D08,
+ HCLGE_OPC_RSS_INPUT_TUPLE = 0x0D02,
+
+ /* Promisuous mode command */
+ HCLGE_OPC_CFG_PROMISC_MODE = 0x0E01,
+
+ /* Vlan offload commands */
+ HCLGE_OPC_VLAN_PORT_TX_CFG = 0x0F01,
+ HCLGE_OPC_VLAN_PORT_RX_CFG = 0x0F02,
+
+ /* Interrupts commands */
+ HCLGE_OPC_ADD_RING_TO_VECTOR = 0x1503,
+ HCLGE_OPC_DEL_RING_TO_VECTOR = 0x1504,
+
+ /* MAC commands */
+ HCLGE_OPC_MAC_VLAN_ADD = 0x1000,
+ HCLGE_OPC_MAC_VLAN_REMOVE = 0x1001,
+ HCLGE_OPC_MAC_VLAN_TYPE_ID = 0x1002,
+ HCLGE_OPC_MAC_VLAN_INSERT = 0x1003,
+ HCLGE_OPC_MAC_VLAN_ALLOCATE = 0x1004,
+ HCLGE_OPC_MAC_ETHTYPE_ADD = 0x1010,
+ HCLGE_OPC_MAC_ETHTYPE_REMOVE = 0x1011,
+
+ /* MAC VLAN commands */
+ HCLGE_OPC_MAC_VLAN_SWITCH_PARAM = 0x1033,
+
+ /* VLAN commands */
+ HCLGE_OPC_VLAN_FILTER_CTRL = 0x1100,
+ HCLGE_OPC_VLAN_FILTER_PF_CFG = 0x1101,
+ HCLGE_OPC_VLAN_FILTER_VF_CFG = 0x1102,
+ HCLGE_OPC_PORT_VLAN_BYPASS = 0x1103,
+
+ /* Flow Director commands */
+ HCLGE_OPC_FD_MODE_CTRL = 0x1200,
+ HCLGE_OPC_FD_GET_ALLOCATION = 0x1201,
+ HCLGE_OPC_FD_KEY_CONFIG = 0x1202,
+ HCLGE_OPC_FD_TCAM_OP = 0x1203,
+ HCLGE_OPC_FD_AD_OP = 0x1204,
+ HCLGE_OPC_FD_CNT_OP = 0x1205,
+ HCLGE_OPC_FD_USER_DEF_OP = 0x1207,
+ HCLGE_OPC_FD_QB_CTRL = 0x1210,
+ HCLGE_OPC_FD_QB_AD_OP = 0x1211,
+
+ /* MDIO command */
+ HCLGE_OPC_MDIO_CONFIG = 0x1900,
+
+ /* QCN commands */
+ HCLGE_OPC_QCN_MOD_CFG = 0x1A01,
+ HCLGE_OPC_QCN_GRP_TMPLT_CFG = 0x1A02,
+ HCLGE_OPC_QCN_SHAPPING_CFG = 0x1A03,
+ HCLGE_OPC_QCN_SHAPPING_BS_CFG = 0x1A04,
+ HCLGE_OPC_QCN_QSET_LINK_CFG = 0x1A05,
+ HCLGE_OPC_QCN_RP_STATUS_GET = 0x1A06,
+ HCLGE_OPC_QCN_AJUST_INIT = 0x1A07,
+ HCLGE_OPC_QCN_DFX_CNT_STATUS = 0x1A08,
+
+ /* Mailbox command */
+ HCLGEVF_OPC_MBX_PF_TO_VF = 0x2000,
+ HCLGEVF_OPC_MBX_VF_TO_PF = 0x2001,
+
+ /* Led command */
+ HCLGE_OPC_LED_STATUS_CFG = 0xB000,
+
+ /* clear hardware resource command */
+ HCLGE_OPC_CLEAR_HW_RESOURCE = 0x700B,
+
+ /* NCL config command */
+ HCLGE_OPC_QUERY_NCL_CONFIG = 0x7011,
+
+ /* IMP stats command */
+ HCLGE_OPC_IMP_STATS_BD = 0x7012,
+ HCLGE_OPC_IMP_STATS_INFO = 0x7013,
+ HCLGE_OPC_IMP_COMPAT_CFG = 0x701A,
+
+ /* SFP command */
+ HCLGE_OPC_GET_SFP_EEPROM = 0x7100,
+ HCLGE_OPC_GET_SFP_EXIST = 0x7101,
+ HCLGE_OPC_GET_SFP_INFO = 0x7104,
+
+ /* Error INT commands */
+ HCLGE_MAC_COMMON_INT_EN = 0x030E,
+ HCLGE_TM_SCH_ECC_INT_EN = 0x0829,
+ HCLGE_SSU_ECC_INT_CMD = 0x0989,
+ HCLGE_SSU_COMMON_INT_CMD = 0x098C,
+ HCLGE_PPU_MPF_ECC_INT_CMD = 0x0B40,
+ HCLGE_PPU_MPF_OTHER_INT_CMD = 0x0B41,
+ HCLGE_PPU_PF_OTHER_INT_CMD = 0x0B42,
+ HCLGE_COMMON_ECC_INT_CFG = 0x1505,
+ HCLGE_QUERY_RAS_INT_STS_BD_NUM = 0x1510,
+ HCLGE_QUERY_CLEAR_MPF_RAS_INT = 0x1511,
+ HCLGE_QUERY_CLEAR_PF_RAS_INT = 0x1512,
+ HCLGE_QUERY_MSIX_INT_STS_BD_NUM = 0x1513,
+ HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT = 0x1514,
+ HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT = 0x1515,
+ HCLGE_QUERY_ALL_ERR_BD_NUM = 0x1516,
+ HCLGE_QUERY_ALL_ERR_INFO = 0x1517,
+ HCLGE_CONFIG_ROCEE_RAS_INT_EN = 0x1580,
+ HCLGE_QUERY_CLEAR_ROCEE_RAS_INT = 0x1581,
+ HCLGE_ROCEE_PF_RAS_INT_CMD = 0x1584,
+ HCLGE_QUERY_ROCEE_ECC_RAS_INFO_CMD = 0x1585,
+ HCLGE_QUERY_ROCEE_AXI_RAS_INFO_CMD = 0x1586,
+ HCLGE_IGU_EGU_TNL_INT_EN = 0x1803,
+ HCLGE_IGU_COMMON_INT_EN = 0x1806,
+ HCLGE_TM_QCN_MEM_INT_CFG = 0x1A14,
+ HCLGE_PPP_CMD0_INT_CMD = 0x2100,
+ HCLGE_PPP_CMD1_INT_CMD = 0x2101,
+ HCLGE_MAC_ETHERTYPE_IDX_RD = 0x2105,
+ HCLGE_OPC_WOL_GET_SUPPORTED_MODE = 0x2201,
+ HCLGE_OPC_WOL_CFG = 0x2202,
+ HCLGE_NCSI_INT_EN = 0x2401,
+
+ /* ROH MAC commands */
+ HCLGE_OPC_MAC_ADDR_CHECK = 0x9004,
+
+ /* PHY command */
+ HCLGE_OPC_PHY_LINK_KSETTING = 0x7025,
+ HCLGE_OPC_PHY_REG = 0x7026,
+
+ /* Query link diagnosis info command */
+ HCLGE_OPC_QUERY_LINK_DIAGNOSIS = 0x702A,
+};
+
+enum hclge_comm_cmd_return_status {
+ HCLGE_COMM_CMD_EXEC_SUCCESS = 0,
+ HCLGE_COMM_CMD_NO_AUTH = 1,
+ HCLGE_COMM_CMD_NOT_SUPPORTED = 2,
+ HCLGE_COMM_CMD_QUEUE_FULL = 3,
+ HCLGE_COMM_CMD_NEXT_ERR = 4,
+ HCLGE_COMM_CMD_UNEXE_ERR = 5,
+ HCLGE_COMM_CMD_PARA_ERR = 6,
+ HCLGE_COMM_CMD_RESULT_ERR = 7,
+ HCLGE_COMM_CMD_TIMEOUT = 8,
+ HCLGE_COMM_CMD_HILINK_ERR = 9,
+ HCLGE_COMM_CMD_QUEUE_ILLEGAL = 10,
+ HCLGE_COMM_CMD_INVALID = 11,
+};
+
+enum HCLGE_COMM_CAP_BITS {
+ HCLGE_COMM_CAP_UDP_GSO_B,
+ HCLGE_COMM_CAP_QB_B,
+ HCLGE_COMM_CAP_FD_FORWARD_TC_B,
+ HCLGE_COMM_CAP_PTP_B,
+ HCLGE_COMM_CAP_INT_QL_B,
+ HCLGE_COMM_CAP_HW_TX_CSUM_B,
+ HCLGE_COMM_CAP_TX_PUSH_B,
+ HCLGE_COMM_CAP_PHY_IMP_B,
+ HCLGE_COMM_CAP_TQP_TXRX_INDEP_B,
+ HCLGE_COMM_CAP_HW_PAD_B,
+ HCLGE_COMM_CAP_STASH_B,
+ HCLGE_COMM_CAP_UDP_TUNNEL_CSUM_B,
+ HCLGE_COMM_CAP_RAS_IMP_B = 12,
+ HCLGE_COMM_CAP_FEC_B = 13,
+ HCLGE_COMM_CAP_PAUSE_B = 14,
+ HCLGE_COMM_CAP_RXD_ADV_LAYOUT_B = 15,
+ HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B = 17,
+ HCLGE_COMM_CAP_CQ_B = 18,
+ HCLGE_COMM_CAP_GRO_B = 20,
+ HCLGE_COMM_CAP_FD_B = 21,
+ HCLGE_COMM_CAP_FEC_STATS_B = 25,
+ HCLGE_COMM_CAP_LANE_NUM_B = 27,
+ HCLGE_COMM_CAP_WOL_B = 28,
+ HCLGE_COMM_CAP_TM_FLUSH_B = 31,
+};
+
+enum HCLGE_COMM_API_CAP_BITS {
+ HCLGE_COMM_API_CAP_FLEX_RSS_TBL_B,
+};
+
+/* capabilities bits map between imp firmware and local driver */
+struct hclge_comm_caps_bit_map {
+ u16 imp_bit;
+ u16 local_bit;
+};
+
+struct hclge_cmdq_tx_timeout_map {
+ u32 opcode;
+ u32 tx_timeout;
+};
+
+struct hclge_comm_firmware_compat_cmd {
+ __le32 compat;
+ u8 rsv[20];
+};
+
+enum hclge_comm_cmd_state {
+ HCLGE_COMM_STATE_CMD_DISABLE,
+};
+
+struct hclge_comm_errcode {
+ u32 imp_errcode;
+ int common_errno;
+};
+
+#define HCLGE_COMM_QUERY_CAP_LENGTH 3
+struct hclge_comm_query_version_cmd {
+ __le32 firmware;
+ __le32 hardware;
+ __le32 api_caps;
+ __le32 caps[HCLGE_COMM_QUERY_CAP_LENGTH]; /* capabilities of device */
+};
+
+#define HCLGE_DESC_DATA_LEN 6
+struct hclge_desc {
+ __le16 opcode;
+ __le16 flag;
+ __le16 retval;
+ __le16 rsv;
+ __le32 data[HCLGE_DESC_DATA_LEN];
+};
+
+struct hclge_comm_cmq_ring {
+ dma_addr_t desc_dma_addr;
+ struct hclge_desc *desc;
+ struct pci_dev *pdev;
+ u32 head;
+ u32 tail;
+
+ u16 buf_size;
+ u16 desc_num;
+ int next_to_use;
+ int next_to_clean;
+ u8 ring_type; /* cmq ring type */
+ spinlock_t lock; /* Command queue lock */
+};
+
+enum hclge_comm_cmd_status {
+ HCLGE_COMM_STATUS_SUCCESS = 0,
+ HCLGE_COMM_ERR_CSQ_FULL = -1,
+ HCLGE_COMM_ERR_CSQ_TIMEOUT = -2,
+ HCLGE_COMM_ERR_CSQ_ERROR = -3,
+};
+
+struct hclge_comm_cmq {
+ struct hclge_comm_cmq_ring csq;
+ struct hclge_comm_cmq_ring crq;
+ u16 tx_timeout;
+ enum hclge_comm_cmd_status last_status;
+};
+
+struct hclge_comm_hw {
+ void __iomem *io_base;
+ void __iomem *mem_base;
+ struct hclge_comm_cmq cmq;
+ unsigned long comm_state;
+};
+
+static inline void hclge_comm_write_reg(void __iomem *base, u32 reg, u32 value)
+{
+ writel(value, base + reg);
+}
+
+static inline u32 hclge_comm_read_reg(u8 __iomem *base, u32 reg)
+{
+ u8 __iomem *reg_addr = READ_ONCE(base);
+
+ return readl(reg_addr + reg);
+}
+
+#define hclge_comm_write_dev(a, reg, value) \
+ hclge_comm_write_reg((a)->io_base, reg, value)
+#define hclge_comm_read_dev(a, reg) \
+ hclge_comm_read_reg((a)->io_base, reg)
+
+void hclge_comm_cmd_init_regs(struct hclge_comm_hw *hw);
+int hclge_comm_cmd_query_version_and_capability(struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_hw *hw,
+ u32 *fw_version, bool is_pf);
+int hclge_comm_alloc_cmd_queue(struct hclge_comm_hw *hw, int ring_type);
+int hclge_comm_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc,
+ int num);
+void hclge_comm_cmd_reuse_desc(struct hclge_desc *desc, bool is_read);
+int hclge_comm_firmware_compat_config(struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_hw *hw, bool en);
+void hclge_comm_free_cmd_desc(struct hclge_comm_cmq_ring *ring);
+void hclge_comm_cmd_setup_basic_desc(struct hclge_desc *desc,
+ enum hclge_opcode_type opcode,
+ bool is_read);
+void hclge_comm_cmd_uninit(struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_hw *hw);
+int hclge_comm_cmd_queue_init(struct pci_dev *pdev, struct hclge_comm_hw *hw);
+int hclge_comm_cmd_init(struct hnae3_ae_dev *ae_dev, struct hclge_comm_hw *hw,
+ u32 *fw_version, bool is_pf,
+ unsigned long reset_pending);
+
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c
new file mode 100644
index 0000000000..b4ae2160af
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c
@@ -0,0 +1,504 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2021-2021 Hisilicon Limited.
+#include <linux/skbuff.h>
+
+#include "hnae3.h"
+#include "hclge_comm_cmd.h"
+#include "hclge_comm_rss.h"
+
+static const u8 hclge_comm_hash_key[] = {
+ 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
+ 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
+ 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
+ 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
+ 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
+};
+
+static void
+hclge_comm_init_rss_tuple(struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_rss_tuple_cfg *rss_tuple_cfg)
+{
+ rss_tuple_cfg->ipv4_tcp_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER;
+ rss_tuple_cfg->ipv4_udp_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER;
+ rss_tuple_cfg->ipv4_sctp_en = HCLGE_COMM_RSS_INPUT_TUPLE_SCTP;
+ rss_tuple_cfg->ipv4_fragment_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER;
+ rss_tuple_cfg->ipv6_tcp_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER;
+ rss_tuple_cfg->ipv6_udp_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER;
+ rss_tuple_cfg->ipv6_sctp_en =
+ ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
+ HCLGE_COMM_RSS_INPUT_TUPLE_SCTP_NO_PORT :
+ HCLGE_COMM_RSS_INPUT_TUPLE_SCTP;
+ rss_tuple_cfg->ipv6_fragment_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER;
+}
+
+int hclge_comm_rss_init_cfg(struct hnae3_handle *nic,
+ struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_rss_cfg *rss_cfg)
+{
+ u16 rss_ind_tbl_size = ae_dev->dev_specs.rss_ind_tbl_size;
+ int rss_algo = HCLGE_COMM_RSS_HASH_ALGO_TOEPLITZ;
+ u16 *rss_ind_tbl;
+
+ if (nic->flags & HNAE3_SUPPORT_VF)
+ rss_cfg->rss_size = nic->kinfo.rss_size;
+
+ if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
+ rss_algo = HCLGE_COMM_RSS_HASH_ALGO_SIMPLE;
+
+ hclge_comm_init_rss_tuple(ae_dev, &rss_cfg->rss_tuple_sets);
+
+ rss_cfg->rss_algo = rss_algo;
+
+ rss_ind_tbl = devm_kcalloc(&ae_dev->pdev->dev, rss_ind_tbl_size,
+ sizeof(*rss_ind_tbl), GFP_KERNEL);
+ if (!rss_ind_tbl)
+ return -ENOMEM;
+
+ rss_cfg->rss_indirection_tbl = rss_ind_tbl;
+ memcpy(rss_cfg->rss_hash_key, hclge_comm_hash_key,
+ HCLGE_COMM_RSS_KEY_SIZE);
+
+ hclge_comm_rss_indir_init_cfg(ae_dev, rss_cfg);
+
+ return 0;
+}
+
+void hclge_comm_get_rss_tc_info(u16 rss_size, u8 hw_tc_map, u16 *tc_offset,
+ u16 *tc_valid, u16 *tc_size)
+{
+ u16 roundup_size;
+ u32 i;
+
+ roundup_size = roundup_pow_of_two(rss_size);
+ roundup_size = ilog2(roundup_size);
+
+ for (i = 0; i < HCLGE_COMM_MAX_TC_NUM; i++) {
+ tc_valid[i] = 1;
+ tc_size[i] = roundup_size;
+ tc_offset[i] = (hw_tc_map & BIT(i)) ? rss_size * i : 0;
+ }
+}
+
+int hclge_comm_set_rss_tc_mode(struct hclge_comm_hw *hw, u16 *tc_offset,
+ u16 *tc_valid, u16 *tc_size)
+{
+ struct hclge_comm_rss_tc_mode_cmd *req;
+ struct hclge_desc desc;
+ unsigned int i;
+ int ret;
+
+ req = (struct hclge_comm_rss_tc_mode_cmd *)desc.data;
+
+ hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
+ for (i = 0; i < HCLGE_COMM_MAX_TC_NUM; i++) {
+ u16 mode = 0;
+
+ hnae3_set_bit(mode, HCLGE_COMM_RSS_TC_VALID_B,
+ (tc_valid[i] & 0x1));
+ hnae3_set_field(mode, HCLGE_COMM_RSS_TC_SIZE_M,
+ HCLGE_COMM_RSS_TC_SIZE_S, tc_size[i]);
+ hnae3_set_bit(mode, HCLGE_COMM_RSS_TC_SIZE_MSB_B,
+ tc_size[i] >> HCLGE_COMM_RSS_TC_SIZE_MSB_OFFSET &
+ 0x1);
+ hnae3_set_field(mode, HCLGE_COMM_RSS_TC_OFFSET_M,
+ HCLGE_COMM_RSS_TC_OFFSET_S, tc_offset[i]);
+
+ req->rss_tc_mode[i] = cpu_to_le16(mode);
+ }
+
+ ret = hclge_comm_cmd_send(hw, &desc, 1);
+ if (ret)
+ dev_err(&hw->cmq.csq.pdev->dev,
+ "failed to set rss tc mode, ret = %d.\n", ret);
+
+ return ret;
+}
+
+int hclge_comm_set_rss_hash_key(struct hclge_comm_rss_cfg *rss_cfg,
+ struct hclge_comm_hw *hw, const u8 *key,
+ const u8 hfunc)
+{
+ u8 hash_algo;
+ int ret;
+
+ ret = hclge_comm_parse_rss_hfunc(rss_cfg, hfunc, &hash_algo);
+ if (ret)
+ return ret;
+
+ /* Set the RSS Hash Key if specififed by the user */
+ if (key) {
+ ret = hclge_comm_set_rss_algo_key(hw, hash_algo, key);
+ if (ret)
+ return ret;
+
+ /* Update the shadow RSS key with user specified qids */
+ memcpy(rss_cfg->rss_hash_key, key, HCLGE_COMM_RSS_KEY_SIZE);
+ } else {
+ ret = hclge_comm_set_rss_algo_key(hw, hash_algo,
+ rss_cfg->rss_hash_key);
+ if (ret)
+ return ret;
+ }
+ rss_cfg->rss_algo = hash_algo;
+
+ return 0;
+}
+
+int hclge_comm_set_rss_tuple(struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_hw *hw,
+ struct hclge_comm_rss_cfg *rss_cfg,
+ struct ethtool_rxnfc *nfc)
+{
+ struct hclge_comm_rss_input_tuple_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ if (nfc->data &
+ ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3))
+ return -EINVAL;
+
+ req = (struct hclge_comm_rss_input_tuple_cmd *)desc.data;
+ hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE,
+ false);
+
+ ret = hclge_comm_init_rss_tuple_cmd(rss_cfg, nfc, ae_dev, req);
+ if (ret) {
+ dev_err(&hw->cmq.csq.pdev->dev,
+ "failed to init rss tuple cmd, ret = %d.\n", ret);
+ return ret;
+ }
+
+ ret = hclge_comm_cmd_send(hw, &desc, 1);
+ if (ret) {
+ dev_err(&hw->cmq.csq.pdev->dev,
+ "failed to set rss tuple, ret = %d.\n", ret);
+ return ret;
+ }
+
+ rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
+ rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
+ rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
+ rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
+ rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
+ rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
+ rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
+ rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
+ return 0;
+}
+
+u32 hclge_comm_get_rss_key_size(struct hnae3_handle *handle)
+{
+ return HCLGE_COMM_RSS_KEY_SIZE;
+}
+
+int hclge_comm_parse_rss_hfunc(struct hclge_comm_rss_cfg *rss_cfg,
+ const u8 hfunc, u8 *hash_algo)
+{
+ switch (hfunc) {
+ case ETH_RSS_HASH_TOP:
+ *hash_algo = HCLGE_COMM_RSS_HASH_ALGO_TOEPLITZ;
+ return 0;
+ case ETH_RSS_HASH_XOR:
+ *hash_algo = HCLGE_COMM_RSS_HASH_ALGO_SIMPLE;
+ return 0;
+ case ETH_RSS_HASH_NO_CHANGE:
+ *hash_algo = rss_cfg->rss_algo;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+void hclge_comm_rss_indir_init_cfg(struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_rss_cfg *rss_cfg)
+{
+ u16 i;
+ /* Initialize RSS indirect table */
+ for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
+ rss_cfg->rss_indirection_tbl[i] = i % rss_cfg->rss_size;
+}
+
+int hclge_comm_get_rss_tuple(struct hclge_comm_rss_cfg *rss_cfg, int flow_type,
+ u8 *tuple_sets)
+{
+ switch (flow_type) {
+ case TCP_V4_FLOW:
+ *tuple_sets = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
+ break;
+ case UDP_V4_FLOW:
+ *tuple_sets = rss_cfg->rss_tuple_sets.ipv4_udp_en;
+ break;
+ case TCP_V6_FLOW:
+ *tuple_sets = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
+ break;
+ case UDP_V6_FLOW:
+ *tuple_sets = rss_cfg->rss_tuple_sets.ipv6_udp_en;
+ break;
+ case SCTP_V4_FLOW:
+ *tuple_sets = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
+ break;
+ case SCTP_V6_FLOW:
+ *tuple_sets = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
+ break;
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ *tuple_sets = HCLGE_COMM_S_IP_BIT | HCLGE_COMM_D_IP_BIT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void
+hclge_comm_append_rss_msb_info(struct hclge_comm_rss_ind_tbl_cmd *req,
+ u16 qid, u32 j)
+{
+ u8 rss_msb_oft;
+ u8 rss_msb_val;
+
+ rss_msb_oft =
+ j * HCLGE_COMM_RSS_CFG_TBL_BW_H / BITS_PER_BYTE;
+ rss_msb_val = (qid >> HCLGE_COMM_RSS_CFG_TBL_BW_L & 0x1) <<
+ (j * HCLGE_COMM_RSS_CFG_TBL_BW_H % BITS_PER_BYTE);
+ req->rss_qid_h[rss_msb_oft] |= rss_msb_val;
+}
+
+int hclge_comm_set_rss_indir_table(struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_hw *hw, const u16 *indir)
+{
+ struct hclge_comm_rss_ind_tbl_cmd *req;
+ struct hclge_desc desc;
+ u16 rss_cfg_tbl_num;
+ int ret;
+ u16 qid;
+ u16 i;
+ u32 j;
+
+ req = (struct hclge_comm_rss_ind_tbl_cmd *)desc.data;
+ rss_cfg_tbl_num = ae_dev->dev_specs.rss_ind_tbl_size /
+ HCLGE_COMM_RSS_CFG_TBL_SIZE;
+
+ for (i = 0; i < rss_cfg_tbl_num; i++) {
+ hclge_comm_cmd_setup_basic_desc(&desc,
+ HCLGE_OPC_RSS_INDIR_TABLE,
+ false);
+
+ req->start_table_index =
+ cpu_to_le16(i * HCLGE_COMM_RSS_CFG_TBL_SIZE);
+ req->rss_set_bitmap =
+ cpu_to_le16(HCLGE_COMM_RSS_SET_BITMAP_MSK);
+ for (j = 0; j < HCLGE_COMM_RSS_CFG_TBL_SIZE; j++) {
+ qid = indir[i * HCLGE_COMM_RSS_CFG_TBL_SIZE + j];
+ req->rss_qid_l[j] = qid & 0xff;
+ hclge_comm_append_rss_msb_info(req, qid, j);
+ }
+ ret = hclge_comm_cmd_send(hw, &desc, 1);
+ if (ret) {
+ dev_err(&hw->cmq.csq.pdev->dev,
+ "failed to configure rss table, ret = %d.\n",
+ ret);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+int hclge_comm_set_rss_input_tuple(struct hclge_comm_hw *hw,
+ struct hclge_comm_rss_cfg *rss_cfg)
+{
+ struct hclge_comm_rss_input_tuple_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE,
+ false);
+
+ req = (struct hclge_comm_rss_input_tuple_cmd *)desc.data;
+
+ req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
+ req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en;
+ req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
+ req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en;
+ req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
+ req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en;
+ req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
+ req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en;
+
+ ret = hclge_comm_cmd_send(hw, &desc, 1);
+ if (ret)
+ dev_err(&hw->cmq.csq.pdev->dev,
+ "failed to configure rss input, ret = %d.\n", ret);
+ return ret;
+}
+
+void hclge_comm_get_rss_hash_info(struct hclge_comm_rss_cfg *rss_cfg, u8 *key,
+ u8 *hfunc)
+{
+ /* Get hash algorithm */
+ if (hfunc) {
+ switch (rss_cfg->rss_algo) {
+ case HCLGE_COMM_RSS_HASH_ALGO_TOEPLITZ:
+ *hfunc = ETH_RSS_HASH_TOP;
+ break;
+ case HCLGE_COMM_RSS_HASH_ALGO_SIMPLE:
+ *hfunc = ETH_RSS_HASH_XOR;
+ break;
+ default:
+ *hfunc = ETH_RSS_HASH_UNKNOWN;
+ break;
+ }
+ }
+
+ /* Get the RSS Key required by the user */
+ if (key)
+ memcpy(key, rss_cfg->rss_hash_key, HCLGE_COMM_RSS_KEY_SIZE);
+}
+
+void hclge_comm_get_rss_indir_tbl(struct hclge_comm_rss_cfg *rss_cfg,
+ u32 *indir, u16 rss_ind_tbl_size)
+{
+ u16 i;
+
+ if (!indir)
+ return;
+
+ for (i = 0; i < rss_ind_tbl_size; i++)
+ indir[i] = rss_cfg->rss_indirection_tbl[i];
+}
+
+int hclge_comm_set_rss_algo_key(struct hclge_comm_hw *hw, const u8 hfunc,
+ const u8 *key)
+{
+ struct hclge_comm_rss_config_cmd *req;
+ unsigned int key_offset = 0;
+ struct hclge_desc desc;
+ int key_counts;
+ int key_size;
+ int ret;
+
+ key_counts = HCLGE_COMM_RSS_KEY_SIZE;
+ req = (struct hclge_comm_rss_config_cmd *)desc.data;
+
+ while (key_counts) {
+ hclge_comm_cmd_setup_basic_desc(&desc,
+ HCLGE_OPC_RSS_GENERIC_CONFIG,
+ false);
+
+ req->hash_config |= (hfunc & HCLGE_COMM_RSS_HASH_ALGO_MASK);
+ req->hash_config |=
+ (key_offset << HCLGE_COMM_RSS_HASH_KEY_OFFSET_B);
+
+ key_size = min(HCLGE_COMM_RSS_HASH_KEY_NUM, key_counts);
+ memcpy(req->hash_key,
+ key + key_offset * HCLGE_COMM_RSS_HASH_KEY_NUM,
+ key_size);
+
+ key_counts -= key_size;
+ key_offset++;
+ ret = hclge_comm_cmd_send(hw, &desc, 1);
+ if (ret) {
+ dev_err(&hw->cmq.csq.pdev->dev,
+ "failed to configure RSS key, ret = %d.\n",
+ ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static u8 hclge_comm_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
+{
+ u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_COMM_S_PORT_BIT : 0;
+
+ if (nfc->data & RXH_L4_B_2_3)
+ hash_sets |= HCLGE_COMM_D_PORT_BIT;
+ else
+ hash_sets &= ~HCLGE_COMM_D_PORT_BIT;
+
+ if (nfc->data & RXH_IP_SRC)
+ hash_sets |= HCLGE_COMM_S_IP_BIT;
+ else
+ hash_sets &= ~HCLGE_COMM_S_IP_BIT;
+
+ if (nfc->data & RXH_IP_DST)
+ hash_sets |= HCLGE_COMM_D_IP_BIT;
+ else
+ hash_sets &= ~HCLGE_COMM_D_IP_BIT;
+
+ if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
+ hash_sets |= HCLGE_COMM_V_TAG_BIT;
+
+ return hash_sets;
+}
+
+int hclge_comm_init_rss_tuple_cmd(struct hclge_comm_rss_cfg *rss_cfg,
+ struct ethtool_rxnfc *nfc,
+ struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_rss_input_tuple_cmd *req)
+{
+ u8 tuple_sets;
+
+ req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
+ req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en;
+ req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
+ req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en;
+ req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
+ req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en;
+ req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
+ req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en;
+
+ tuple_sets = hclge_comm_get_rss_hash_bits(nfc);
+ switch (nfc->flow_type) {
+ case TCP_V4_FLOW:
+ req->ipv4_tcp_en = tuple_sets;
+ break;
+ case TCP_V6_FLOW:
+ req->ipv6_tcp_en = tuple_sets;
+ break;
+ case UDP_V4_FLOW:
+ req->ipv4_udp_en = tuple_sets;
+ break;
+ case UDP_V6_FLOW:
+ req->ipv6_udp_en = tuple_sets;
+ break;
+ case SCTP_V4_FLOW:
+ req->ipv4_sctp_en = tuple_sets;
+ break;
+ case SCTP_V6_FLOW:
+ if (ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
+ (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
+ return -EINVAL;
+
+ req->ipv6_sctp_en = tuple_sets;
+ break;
+ case IPV4_FLOW:
+ req->ipv4_fragment_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER;
+ break;
+ case IPV6_FLOW:
+ req->ipv6_fragment_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+u64 hclge_comm_convert_rss_tuple(u8 tuple_sets)
+{
+ u64 tuple_data = 0;
+
+ if (tuple_sets & HCLGE_COMM_D_PORT_BIT)
+ tuple_data |= RXH_L4_B_2_3;
+ if (tuple_sets & HCLGE_COMM_S_PORT_BIT)
+ tuple_data |= RXH_L4_B_0_1;
+ if (tuple_sets & HCLGE_COMM_D_IP_BIT)
+ tuple_data |= RXH_IP_DST;
+ if (tuple_sets & HCLGE_COMM_S_IP_BIT)
+ tuple_data |= RXH_IP_SRC;
+
+ return tuple_data;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h
new file mode 100644
index 0000000000..cdafa63fe3
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h
@@ -0,0 +1,133 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+// Copyright (c) 2021-2021 Hisilicon Limited.
+
+#ifndef __HCLGE_COMM_RSS_H
+#define __HCLGE_COMM_RSS_H
+#include <linux/types.h>
+
+#include "hnae3.h"
+#include "hclge_comm_cmd.h"
+
+#define HCLGE_COMM_RSS_HASH_ALGO_TOEPLITZ 0
+#define HCLGE_COMM_RSS_HASH_ALGO_SIMPLE 1
+#define HCLGE_COMM_RSS_HASH_ALGO_SYMMETRIC 2
+
+#define HCLGE_COMM_RSS_INPUT_TUPLE_OTHER GENMASK(3, 0)
+#define HCLGE_COMM_RSS_INPUT_TUPLE_SCTP GENMASK(4, 0)
+
+#define HCLGE_COMM_D_PORT_BIT BIT(0)
+#define HCLGE_COMM_S_PORT_BIT BIT(1)
+#define HCLGE_COMM_D_IP_BIT BIT(2)
+#define HCLGE_COMM_S_IP_BIT BIT(3)
+#define HCLGE_COMM_V_TAG_BIT BIT(4)
+#define HCLGE_COMM_RSS_INPUT_TUPLE_SCTP_NO_PORT \
+ (HCLGE_COMM_D_IP_BIT | HCLGE_COMM_S_IP_BIT | HCLGE_COMM_V_TAG_BIT)
+#define HCLGE_COMM_MAX_TC_NUM 8
+
+#define HCLGE_COMM_RSS_TC_OFFSET_S 0
+#define HCLGE_COMM_RSS_TC_OFFSET_M GENMASK(10, 0)
+#define HCLGE_COMM_RSS_TC_SIZE_MSB_B 11
+#define HCLGE_COMM_RSS_TC_SIZE_S 12
+#define HCLGE_COMM_RSS_TC_SIZE_M GENMASK(14, 12)
+#define HCLGE_COMM_RSS_TC_VALID_B 15
+#define HCLGE_COMM_RSS_TC_SIZE_MSB_OFFSET 3
+
+struct hclge_comm_rss_tuple_cfg {
+ u8 ipv4_tcp_en;
+ u8 ipv4_udp_en;
+ u8 ipv4_sctp_en;
+ u8 ipv4_fragment_en;
+ u8 ipv6_tcp_en;
+ u8 ipv6_udp_en;
+ u8 ipv6_sctp_en;
+ u8 ipv6_fragment_en;
+};
+
+#define HCLGE_COMM_RSS_KEY_SIZE 40
+#define HCLGE_COMM_RSS_CFG_TBL_SIZE 16
+#define HCLGE_COMM_RSS_CFG_TBL_BW_H 2U
+#define HCLGE_COMM_RSS_CFG_TBL_BW_L 8U
+#define HCLGE_COMM_RSS_CFG_TBL_SIZE_H 4
+#define HCLGE_COMM_RSS_SET_BITMAP_MSK GENMASK(15, 0)
+#define HCLGE_COMM_RSS_HASH_ALGO_MASK GENMASK(3, 0)
+#define HCLGE_COMM_RSS_HASH_KEY_OFFSET_B 4
+
+#define HCLGE_COMM_RSS_HASH_KEY_NUM 16
+struct hclge_comm_rss_config_cmd {
+ u8 hash_config;
+ u8 rsv[7];
+ u8 hash_key[HCLGE_COMM_RSS_HASH_KEY_NUM];
+};
+
+struct hclge_comm_rss_cfg {
+ u8 rss_hash_key[HCLGE_COMM_RSS_KEY_SIZE]; /* user configured hash keys */
+
+ /* shadow table */
+ u16 *rss_indirection_tbl;
+ u32 rss_algo;
+
+ struct hclge_comm_rss_tuple_cfg rss_tuple_sets;
+ u32 rss_size;
+};
+
+struct hclge_comm_rss_input_tuple_cmd {
+ u8 ipv4_tcp_en;
+ u8 ipv4_udp_en;
+ u8 ipv4_sctp_en;
+ u8 ipv4_fragment_en;
+ u8 ipv6_tcp_en;
+ u8 ipv6_udp_en;
+ u8 ipv6_sctp_en;
+ u8 ipv6_fragment_en;
+ u8 rsv[16];
+};
+
+struct hclge_comm_rss_ind_tbl_cmd {
+ __le16 start_table_index;
+ __le16 rss_set_bitmap;
+ u8 rss_qid_h[HCLGE_COMM_RSS_CFG_TBL_SIZE_H];
+ u8 rss_qid_l[HCLGE_COMM_RSS_CFG_TBL_SIZE];
+};
+
+struct hclge_comm_rss_tc_mode_cmd {
+ __le16 rss_tc_mode[HCLGE_COMM_MAX_TC_NUM];
+ u8 rsv[8];
+};
+
+u32 hclge_comm_get_rss_key_size(struct hnae3_handle *handle);
+void hclge_comm_rss_indir_init_cfg(struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_rss_cfg *rss_cfg);
+int hclge_comm_get_rss_tuple(struct hclge_comm_rss_cfg *rss_cfg, int flow_type,
+ u8 *tuple_sets);
+int hclge_comm_parse_rss_hfunc(struct hclge_comm_rss_cfg *rss_cfg,
+ const u8 hfunc, u8 *hash_algo);
+void hclge_comm_get_rss_hash_info(struct hclge_comm_rss_cfg *rss_cfg, u8 *key,
+ u8 *hfunc);
+void hclge_comm_get_rss_indir_tbl(struct hclge_comm_rss_cfg *rss_cfg,
+ u32 *indir, u16 rss_ind_tbl_size);
+int hclge_comm_set_rss_algo_key(struct hclge_comm_hw *hw, const u8 hfunc,
+ const u8 *key);
+int hclge_comm_init_rss_tuple_cmd(struct hclge_comm_rss_cfg *rss_cfg,
+ struct ethtool_rxnfc *nfc,
+ struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_rss_input_tuple_cmd *req);
+u64 hclge_comm_convert_rss_tuple(u8 tuple_sets);
+int hclge_comm_set_rss_input_tuple(struct hclge_comm_hw *hw,
+ struct hclge_comm_rss_cfg *rss_cfg);
+int hclge_comm_set_rss_indir_table(struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_hw *hw, const u16 *indir);
+int hclge_comm_rss_init_cfg(struct hnae3_handle *nic,
+ struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_rss_cfg *rss_cfg);
+void hclge_comm_get_rss_tc_info(u16 rss_size, u8 hw_tc_map, u16 *tc_offset,
+ u16 *tc_valid, u16 *tc_size);
+int hclge_comm_set_rss_tc_mode(struct hclge_comm_hw *hw, u16 *tc_offset,
+ u16 *tc_valid, u16 *tc_size);
+int hclge_comm_set_rss_hash_key(struct hclge_comm_rss_cfg *rss_cfg,
+ struct hclge_comm_hw *hw, const u8 *key,
+ const u8 hfunc);
+int hclge_comm_set_rss_tuple(struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_hw *hw,
+ struct hclge_comm_rss_cfg *rss_cfg,
+ struct ethtool_rxnfc *nfc);
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c
new file mode 100644
index 0000000000..f3c9395d83
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2021-2021 Hisilicon Limited.
+
+#include <linux/err.h>
+
+#include "hnae3.h"
+#include "hclge_comm_cmd.h"
+#include "hclge_comm_tqp_stats.h"
+
+u64 *hclge_comm_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
+{
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ struct hclge_comm_tqp *tqp;
+ u64 *buff = data;
+ u16 i;
+
+ for (i = 0; i < kinfo->num_tqps; i++) {
+ tqp = container_of(kinfo->tqp[i], struct hclge_comm_tqp, q);
+ *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
+ }
+
+ for (i = 0; i < kinfo->num_tqps; i++) {
+ tqp = container_of(kinfo->tqp[i], struct hclge_comm_tqp, q);
+ *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
+ }
+
+ return buff;
+}
+
+int hclge_comm_tqps_get_sset_count(struct hnae3_handle *handle)
+{
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+
+ return kinfo->num_tqps * HCLGE_COMM_QUEUE_PAIR_SIZE;
+}
+
+u8 *hclge_comm_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
+{
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ u8 *buff = data;
+ u16 i;
+
+ for (i = 0; i < kinfo->num_tqps; i++) {
+ struct hclge_comm_tqp *tqp =
+ container_of(kinfo->tqp[i], struct hclge_comm_tqp, q);
+ snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd", tqp->index);
+ buff += ETH_GSTRING_LEN;
+ }
+
+ for (i = 0; i < kinfo->num_tqps; i++) {
+ struct hclge_comm_tqp *tqp =
+ container_of(kinfo->tqp[i], struct hclge_comm_tqp, q);
+ snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd", tqp->index);
+ buff += ETH_GSTRING_LEN;
+ }
+
+ return buff;
+}
+
+int hclge_comm_tqps_update_stats(struct hnae3_handle *handle,
+ struct hclge_comm_hw *hw)
+{
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ struct hclge_comm_tqp *tqp;
+ struct hclge_desc desc;
+ int ret;
+ u16 i;
+
+ for (i = 0; i < kinfo->num_tqps; i++) {
+ tqp = container_of(kinfo->tqp[i], struct hclge_comm_tqp, q);
+ hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_RX_STATS,
+ true);
+
+ desc.data[0] = cpu_to_le32(tqp->index);
+ ret = hclge_comm_cmd_send(hw, &desc, 1);
+ if (ret) {
+ dev_err(&hw->cmq.csq.pdev->dev,
+ "failed to get tqp stat, ret = %d, rx = %u.\n",
+ ret, i);
+ return ret;
+ }
+ tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
+ le32_to_cpu(desc.data[1]);
+
+ hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_TX_STATS,
+ true);
+
+ desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
+ ret = hclge_comm_cmd_send(hw, &desc, 1);
+ if (ret) {
+ dev_err(&hw->cmq.csq.pdev->dev,
+ "failed to get tqp stat, ret = %d, tx = %u.\n",
+ ret, i);
+ return ret;
+ }
+ tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
+ le32_to_cpu(desc.data[1]);
+ }
+
+ return 0;
+}
+
+void hclge_comm_reset_tqp_stats(struct hnae3_handle *handle)
+{
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ struct hclge_comm_tqp *tqp;
+ struct hnae3_queue *queue;
+ u16 i;
+
+ for (i = 0; i < kinfo->num_tqps; i++) {
+ queue = kinfo->tqp[i];
+ tqp = container_of(queue, struct hclge_comm_tqp, q);
+ memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
+ }
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.h b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.h
new file mode 100644
index 0000000000..a46350162e
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+// Copyright (c) 2021-2021 Hisilicon Limited.
+
+#ifndef __HCLGE_COMM_TQP_STATS_H
+#define __HCLGE_COMM_TQP_STATS_H
+#include <linux/types.h>
+#include <linux/etherdevice.h>
+#include "hnae3.h"
+
+/* each tqp has TX & RX two queues */
+#define HCLGE_COMM_QUEUE_PAIR_SIZE 2
+
+/* TQP stats */
+struct hclge_comm_tqp_stats {
+ /* query_tqp_tx_queue_statistics ,opcode id: 0x0B03 */
+ u64 rcb_tx_ring_pktnum_rcd; /* 32bit */
+ /* query_tqp_rx_queue_statistics ,opcode id: 0x0B13 */
+ u64 rcb_rx_ring_pktnum_rcd; /* 32bit */
+};
+
+struct hclge_comm_tqp {
+ /* copy of device pointer from pci_dev,
+ * used when perform DMA mapping
+ */
+ struct device *dev;
+ struct hnae3_queue q;
+ struct hclge_comm_tqp_stats tqp_stats;
+ u16 index; /* Global index in a NIC controller */
+
+ bool alloced;
+};
+
+u64 *hclge_comm_tqps_get_stats(struct hnae3_handle *handle, u64 *data);
+int hclge_comm_tqps_get_sset_count(struct hnae3_handle *handle);
+u8 *hclge_comm_tqps_get_strings(struct hnae3_handle *handle, u8 *data);
+void hclge_comm_reset_tqp_stats(struct hnae3_handle *handle);
+int hclge_comm_tqps_update_stats(struct hnae3_handle *handle,
+ struct hclge_comm_hw *hw);
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
new file mode 100644
index 0000000000..3b6dbf158b
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
+
+#include "hnae3.h"
+#include "hns3_enet.h"
+
+static int hns3_dcbnl_ieee_getets(struct net_device *ndev, struct ieee_ets *ets)
+{
+ struct hnae3_handle *h = hns3_get_handle(ndev);
+
+ if (hns3_nic_resetting(ndev))
+ return -EBUSY;
+
+ if (h->kinfo.dcb_ops->ieee_getets)
+ return h->kinfo.dcb_ops->ieee_getets(h, ets);
+
+ return -EOPNOTSUPP;
+}
+
+static int hns3_dcbnl_ieee_setets(struct net_device *ndev, struct ieee_ets *ets)
+{
+ struct hnae3_handle *h = hns3_get_handle(ndev);
+
+ if (hns3_nic_resetting(ndev))
+ return -EBUSY;
+
+ if (h->kinfo.dcb_ops->ieee_setets)
+ return h->kinfo.dcb_ops->ieee_setets(h, ets);
+
+ return -EOPNOTSUPP;
+}
+
+static int hns3_dcbnl_ieee_getpfc(struct net_device *ndev, struct ieee_pfc *pfc)
+{
+ struct hnae3_handle *h = hns3_get_handle(ndev);
+
+ if (hns3_nic_resetting(ndev))
+ return -EBUSY;
+
+ if (h->kinfo.dcb_ops->ieee_getpfc)
+ return h->kinfo.dcb_ops->ieee_getpfc(h, pfc);
+
+ return -EOPNOTSUPP;
+}
+
+static int hns3_dcbnl_ieee_setpfc(struct net_device *ndev, struct ieee_pfc *pfc)
+{
+ struct hnae3_handle *h = hns3_get_handle(ndev);
+
+ if (hns3_nic_resetting(ndev))
+ return -EBUSY;
+
+ if (h->kinfo.dcb_ops->ieee_setpfc)
+ return h->kinfo.dcb_ops->ieee_setpfc(h, pfc);
+
+ return -EOPNOTSUPP;
+}
+
+static int hns3_dcbnl_ieee_setapp(struct net_device *ndev, struct dcb_app *app)
+{
+ struct hnae3_handle *h = hns3_get_handle(ndev);
+
+ if (hns3_nic_resetting(ndev))
+ return -EBUSY;
+
+ if (h->kinfo.dcb_ops->ieee_setapp)
+ return h->kinfo.dcb_ops->ieee_setapp(h, app);
+
+ return -EOPNOTSUPP;
+}
+
+static int hns3_dcbnl_ieee_delapp(struct net_device *ndev, struct dcb_app *app)
+{
+ struct hnae3_handle *h = hns3_get_handle(ndev);
+
+ if (hns3_nic_resetting(ndev))
+ return -EBUSY;
+
+ if (h->kinfo.dcb_ops->ieee_setapp)
+ return h->kinfo.dcb_ops->ieee_delapp(h, app);
+
+ return -EOPNOTSUPP;
+}
+
+/* DCBX configuration */
+static u8 hns3_dcbnl_getdcbx(struct net_device *ndev)
+{
+ struct hnae3_handle *h = hns3_get_handle(ndev);
+
+ if (h->kinfo.dcb_ops->getdcbx)
+ return h->kinfo.dcb_ops->getdcbx(h);
+
+ return 0;
+}
+
+/* return 0 if successful, otherwise fail */
+static u8 hns3_dcbnl_setdcbx(struct net_device *ndev, u8 mode)
+{
+ struct hnae3_handle *h = hns3_get_handle(ndev);
+
+ if (h->kinfo.dcb_ops->setdcbx)
+ return h->kinfo.dcb_ops->setdcbx(h, mode);
+
+ return 1;
+}
+
+static const struct dcbnl_rtnl_ops hns3_dcbnl_ops = {
+ .ieee_getets = hns3_dcbnl_ieee_getets,
+ .ieee_setets = hns3_dcbnl_ieee_setets,
+ .ieee_getpfc = hns3_dcbnl_ieee_getpfc,
+ .ieee_setpfc = hns3_dcbnl_ieee_setpfc,
+ .ieee_setapp = hns3_dcbnl_ieee_setapp,
+ .ieee_delapp = hns3_dcbnl_ieee_delapp,
+ .getdcbx = hns3_dcbnl_getdcbx,
+ .setdcbx = hns3_dcbnl_setdcbx,
+};
+
+/* hclge_dcbnl_setup - DCBNL setup
+ * @handle: the corresponding vport handle
+ * Set up DCBNL
+ */
+void hns3_dcbnl_setup(struct hnae3_handle *handle)
+{
+ struct net_device *dev = handle->kinfo.netdev;
+
+ if ((!handle->kinfo.dcb_ops) || (handle->flags & HNAE3_SUPPORT_VF))
+ return;
+
+ dev->dcbnl_ops = &hns3_dcbnl_ops;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
new file mode 100644
index 0000000000..4f385a18d2
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -0,0 +1,1452 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2018-2019 Hisilicon Limited. */
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+
+#include "hnae3.h"
+#include "hns3_debugfs.h"
+#include "hns3_enet.h"
+
+static struct dentry *hns3_dbgfs_root;
+
+static struct hns3_dbg_dentry_info hns3_dbg_dentry[] = {
+ {
+ .name = "tm"
+ },
+ {
+ .name = "tx_bd_info"
+ },
+ {
+ .name = "rx_bd_info"
+ },
+ {
+ .name = "mac_list"
+ },
+ {
+ .name = "reg"
+ },
+ {
+ .name = "queue"
+ },
+ {
+ .name = "fd"
+ },
+ /* keep common at the bottom and add new directory above */
+ {
+ .name = "common"
+ },
+};
+
+static int hns3_dbg_bd_file_init(struct hnae3_handle *handle, u32 cmd);
+static int hns3_dbg_common_file_init(struct hnae3_handle *handle, u32 cmd);
+
+static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = {
+ {
+ .name = "tm_nodes",
+ .cmd = HNAE3_DBG_CMD_TM_NODES,
+ .dentry = HNS3_DBG_DENTRY_TM,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "tm_priority",
+ .cmd = HNAE3_DBG_CMD_TM_PRI,
+ .dentry = HNS3_DBG_DENTRY_TM,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "tm_qset",
+ .cmd = HNAE3_DBG_CMD_TM_QSET,
+ .dentry = HNS3_DBG_DENTRY_TM,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "tm_map",
+ .cmd = HNAE3_DBG_CMD_TM_MAP,
+ .dentry = HNS3_DBG_DENTRY_TM,
+ .buf_len = HNS3_DBG_READ_LEN_1MB,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "tm_pg",
+ .cmd = HNAE3_DBG_CMD_TM_PG,
+ .dentry = HNS3_DBG_DENTRY_TM,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "tm_port",
+ .cmd = HNAE3_DBG_CMD_TM_PORT,
+ .dentry = HNS3_DBG_DENTRY_TM,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "tc_sch_info",
+ .cmd = HNAE3_DBG_CMD_TC_SCH_INFO,
+ .dentry = HNS3_DBG_DENTRY_TM,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "qos_pause_cfg",
+ .cmd = HNAE3_DBG_CMD_QOS_PAUSE_CFG,
+ .dentry = HNS3_DBG_DENTRY_TM,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "qos_pri_map",
+ .cmd = HNAE3_DBG_CMD_QOS_PRI_MAP,
+ .dentry = HNS3_DBG_DENTRY_TM,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "qos_dscp_map",
+ .cmd = HNAE3_DBG_CMD_QOS_DSCP_MAP,
+ .dentry = HNS3_DBG_DENTRY_TM,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "qos_buf_cfg",
+ .cmd = HNAE3_DBG_CMD_QOS_BUF_CFG,
+ .dentry = HNS3_DBG_DENTRY_TM,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "dev_info",
+ .cmd = HNAE3_DBG_CMD_DEV_INFO,
+ .dentry = HNS3_DBG_DENTRY_COMMON,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "tx_bd_queue",
+ .cmd = HNAE3_DBG_CMD_TX_BD,
+ .dentry = HNS3_DBG_DENTRY_TX_BD,
+ .buf_len = HNS3_DBG_READ_LEN_5MB,
+ .init = hns3_dbg_bd_file_init,
+ },
+ {
+ .name = "rx_bd_queue",
+ .cmd = HNAE3_DBG_CMD_RX_BD,
+ .dentry = HNS3_DBG_DENTRY_RX_BD,
+ .buf_len = HNS3_DBG_READ_LEN_4MB,
+ .init = hns3_dbg_bd_file_init,
+ },
+ {
+ .name = "uc",
+ .cmd = HNAE3_DBG_CMD_MAC_UC,
+ .dentry = HNS3_DBG_DENTRY_MAC,
+ .buf_len = HNS3_DBG_READ_LEN_128KB,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "mc",
+ .cmd = HNAE3_DBG_CMD_MAC_MC,
+ .dentry = HNS3_DBG_DENTRY_MAC,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "mng_tbl",
+ .cmd = HNAE3_DBG_CMD_MNG_TBL,
+ .dentry = HNS3_DBG_DENTRY_COMMON,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "loopback",
+ .cmd = HNAE3_DBG_CMD_LOOPBACK,
+ .dentry = HNS3_DBG_DENTRY_COMMON,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "interrupt_info",
+ .cmd = HNAE3_DBG_CMD_INTERRUPT_INFO,
+ .dentry = HNS3_DBG_DENTRY_COMMON,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "reset_info",
+ .cmd = HNAE3_DBG_CMD_RESET_INFO,
+ .dentry = HNS3_DBG_DENTRY_COMMON,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "imp_info",
+ .cmd = HNAE3_DBG_CMD_IMP_INFO,
+ .dentry = HNS3_DBG_DENTRY_COMMON,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "ncl_config",
+ .cmd = HNAE3_DBG_CMD_NCL_CONFIG,
+ .dentry = HNS3_DBG_DENTRY_COMMON,
+ .buf_len = HNS3_DBG_READ_LEN_128KB,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "mac_tnl_status",
+ .cmd = HNAE3_DBG_CMD_MAC_TNL_STATUS,
+ .dentry = HNS3_DBG_DENTRY_COMMON,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "bios_common",
+ .cmd = HNAE3_DBG_CMD_REG_BIOS_COMMON,
+ .dentry = HNS3_DBG_DENTRY_REG,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "ssu",
+ .cmd = HNAE3_DBG_CMD_REG_SSU,
+ .dentry = HNS3_DBG_DENTRY_REG,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "igu_egu",
+ .cmd = HNAE3_DBG_CMD_REG_IGU_EGU,
+ .dentry = HNS3_DBG_DENTRY_REG,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "rpu",
+ .cmd = HNAE3_DBG_CMD_REG_RPU,
+ .dentry = HNS3_DBG_DENTRY_REG,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "ncsi",
+ .cmd = HNAE3_DBG_CMD_REG_NCSI,
+ .dentry = HNS3_DBG_DENTRY_REG,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "rtc",
+ .cmd = HNAE3_DBG_CMD_REG_RTC,
+ .dentry = HNS3_DBG_DENTRY_REG,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "ppp",
+ .cmd = HNAE3_DBG_CMD_REG_PPP,
+ .dentry = HNS3_DBG_DENTRY_REG,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "rcb",
+ .cmd = HNAE3_DBG_CMD_REG_RCB,
+ .dentry = HNS3_DBG_DENTRY_REG,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "tqp",
+ .cmd = HNAE3_DBG_CMD_REG_TQP,
+ .dentry = HNS3_DBG_DENTRY_REG,
+ .buf_len = HNS3_DBG_READ_LEN_128KB,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "mac",
+ .cmd = HNAE3_DBG_CMD_REG_MAC,
+ .dentry = HNS3_DBG_DENTRY_REG,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "dcb",
+ .cmd = HNAE3_DBG_CMD_REG_DCB,
+ .dentry = HNS3_DBG_DENTRY_REG,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "queue_map",
+ .cmd = HNAE3_DBG_CMD_QUEUE_MAP,
+ .dentry = HNS3_DBG_DENTRY_QUEUE,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "rx_queue_info",
+ .cmd = HNAE3_DBG_CMD_RX_QUEUE_INFO,
+ .dentry = HNS3_DBG_DENTRY_QUEUE,
+ .buf_len = HNS3_DBG_READ_LEN_1MB,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "tx_queue_info",
+ .cmd = HNAE3_DBG_CMD_TX_QUEUE_INFO,
+ .dentry = HNS3_DBG_DENTRY_QUEUE,
+ .buf_len = HNS3_DBG_READ_LEN_1MB,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "fd_tcam",
+ .cmd = HNAE3_DBG_CMD_FD_TCAM,
+ .dentry = HNS3_DBG_DENTRY_FD,
+ .buf_len = HNS3_DBG_READ_LEN_1MB,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "service_task_info",
+ .cmd = HNAE3_DBG_CMD_SERV_INFO,
+ .dentry = HNS3_DBG_DENTRY_COMMON,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "vlan_config",
+ .cmd = HNAE3_DBG_CMD_VLAN_CONFIG,
+ .dentry = HNS3_DBG_DENTRY_COMMON,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "ptp_info",
+ .cmd = HNAE3_DBG_CMD_PTP_INFO,
+ .dentry = HNS3_DBG_DENTRY_COMMON,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "fd_counter",
+ .cmd = HNAE3_DBG_CMD_FD_COUNTER,
+ .dentry = HNS3_DBG_DENTRY_FD,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "umv_info",
+ .cmd = HNAE3_DBG_CMD_UMV_INFO,
+ .dentry = HNS3_DBG_DENTRY_COMMON,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "page_pool_info",
+ .cmd = HNAE3_DBG_CMD_PAGE_POOL_INFO,
+ .dentry = HNS3_DBG_DENTRY_COMMON,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "coalesce_info",
+ .cmd = HNAE3_DBG_CMD_COAL_INFO,
+ .dentry = HNS3_DBG_DENTRY_COMMON,
+ .buf_len = HNS3_DBG_READ_LEN_1MB,
+ .init = hns3_dbg_common_file_init,
+ },
+};
+
+static struct hns3_dbg_cap_info hns3_dbg_cap[] = {
+ {
+ .name = "support FD",
+ .cap_bit = HNAE3_DEV_SUPPORT_FD_B,
+ }, {
+ .name = "support GRO",
+ .cap_bit = HNAE3_DEV_SUPPORT_GRO_B,
+ }, {
+ .name = "support FEC",
+ .cap_bit = HNAE3_DEV_SUPPORT_FEC_B,
+ }, {
+ .name = "support UDP GSO",
+ .cap_bit = HNAE3_DEV_SUPPORT_UDP_GSO_B,
+ }, {
+ .name = "support PTP",
+ .cap_bit = HNAE3_DEV_SUPPORT_PTP_B,
+ }, {
+ .name = "support INT QL",
+ .cap_bit = HNAE3_DEV_SUPPORT_INT_QL_B,
+ }, {
+ .name = "support HW TX csum",
+ .cap_bit = HNAE3_DEV_SUPPORT_HW_TX_CSUM_B,
+ }, {
+ .name = "support UDP tunnel csum",
+ .cap_bit = HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B,
+ }, {
+ .name = "support TX push",
+ .cap_bit = HNAE3_DEV_SUPPORT_TX_PUSH_B,
+ }, {
+ .name = "support imp-controlled PHY",
+ .cap_bit = HNAE3_DEV_SUPPORT_PHY_IMP_B,
+ }, {
+ .name = "support imp-controlled RAS",
+ .cap_bit = HNAE3_DEV_SUPPORT_RAS_IMP_B,
+ }, {
+ .name = "support rxd advanced layout",
+ .cap_bit = HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B,
+ }, {
+ .name = "support port vlan bypass",
+ .cap_bit = HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B,
+ }, {
+ .name = "support modify vlan filter state",
+ .cap_bit = HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B,
+ }, {
+ .name = "support FEC statistics",
+ .cap_bit = HNAE3_DEV_SUPPORT_FEC_STATS_B,
+ }, {
+ .name = "support lane num",
+ .cap_bit = HNAE3_DEV_SUPPORT_LANE_NUM_B,
+ }, {
+ .name = "support wake on lan",
+ .cap_bit = HNAE3_DEV_SUPPORT_WOL_B,
+ }, {
+ .name = "support tm flush",
+ .cap_bit = HNAE3_DEV_SUPPORT_TM_FLUSH_B,
+ }
+};
+
+static const struct hns3_dbg_item coal_info_items[] = {
+ { "VEC_ID", 2 },
+ { "ALGO_STATE", 2 },
+ { "PROFILE_ID", 2 },
+ { "CQE_MODE", 2 },
+ { "TUNE_STATE", 2 },
+ { "STEPS_LEFT", 2 },
+ { "STEPS_RIGHT", 2 },
+ { "TIRED", 2 },
+ { "SW_GL", 2 },
+ { "SW_QL", 2 },
+ { "HW_GL", 2 },
+ { "HW_QL", 2 },
+};
+
+static const char * const dim_cqe_mode_str[] = { "EQE", "CQE" };
+static const char * const dim_state_str[] = { "START", "IN_PROG", "APPLY" };
+static const char * const
+dim_tune_stat_str[] = { "ON_TOP", "TIRED", "RIGHT", "LEFT" };
+
+static void hns3_dbg_fill_content(char *content, u16 len,
+ const struct hns3_dbg_item *items,
+ const char **result, u16 size)
+{
+#define HNS3_DBG_LINE_END_LEN 2
+ char *pos = content;
+ u16 item_len;
+ u16 i;
+
+ if (!len) {
+ return;
+ } else if (len <= HNS3_DBG_LINE_END_LEN) {
+ *pos++ = '\0';
+ return;
+ }
+
+ memset(content, ' ', len);
+ len -= HNS3_DBG_LINE_END_LEN;
+
+ for (i = 0; i < size; i++) {
+ item_len = strlen(items[i].name) + items[i].interval;
+ if (len < item_len)
+ break;
+
+ if (result) {
+ if (item_len < strlen(result[i]))
+ break;
+ memcpy(pos, result[i], strlen(result[i]));
+ } else {
+ memcpy(pos, items[i].name, strlen(items[i].name));
+ }
+ pos += item_len;
+ len -= item_len;
+ }
+ *pos++ = '\n';
+ *pos++ = '\0';
+}
+
+static void hns3_get_coal_info(struct hns3_enet_tqp_vector *tqp_vector,
+ char **result, int i, bool is_tx)
+{
+ unsigned int gl_offset, ql_offset;
+ struct hns3_enet_coalesce *coal;
+ unsigned int reg_val;
+ unsigned int j = 0;
+ struct dim *dim;
+ bool ql_enable;
+
+ if (is_tx) {
+ coal = &tqp_vector->tx_group.coal;
+ dim = &tqp_vector->tx_group.dim;
+ gl_offset = HNS3_VECTOR_GL1_OFFSET;
+ ql_offset = HNS3_VECTOR_TX_QL_OFFSET;
+ ql_enable = tqp_vector->tx_group.coal.ql_enable;
+ } else {
+ coal = &tqp_vector->rx_group.coal;
+ dim = &tqp_vector->rx_group.dim;
+ gl_offset = HNS3_VECTOR_GL0_OFFSET;
+ ql_offset = HNS3_VECTOR_RX_QL_OFFSET;
+ ql_enable = tqp_vector->rx_group.coal.ql_enable;
+ }
+
+ sprintf(result[j++], "%d", i);
+ sprintf(result[j++], "%s", dim->state < ARRAY_SIZE(dim_state_str) ?
+ dim_state_str[dim->state] : "unknown");
+ sprintf(result[j++], "%u", dim->profile_ix);
+ sprintf(result[j++], "%s", dim->mode < ARRAY_SIZE(dim_cqe_mode_str) ?
+ dim_cqe_mode_str[dim->mode] : "unknown");
+ sprintf(result[j++], "%s",
+ dim->tune_state < ARRAY_SIZE(dim_tune_stat_str) ?
+ dim_tune_stat_str[dim->tune_state] : "unknown");
+ sprintf(result[j++], "%u", dim->steps_left);
+ sprintf(result[j++], "%u", dim->steps_right);
+ sprintf(result[j++], "%u", dim->tired);
+ sprintf(result[j++], "%u", coal->int_gl);
+ sprintf(result[j++], "%u", coal->int_ql);
+ reg_val = readl(tqp_vector->mask_addr + gl_offset) &
+ HNS3_VECTOR_GL_MASK;
+ sprintf(result[j++], "%u", reg_val);
+ if (ql_enable) {
+ reg_val = readl(tqp_vector->mask_addr + ql_offset) &
+ HNS3_VECTOR_QL_MASK;
+ sprintf(result[j++], "%u", reg_val);
+ } else {
+ sprintf(result[j++], "NA");
+ }
+}
+
+static void hns3_dump_coal_info(struct hnae3_handle *h, char *buf, int len,
+ int *pos, bool is_tx)
+{
+ char data_str[ARRAY_SIZE(coal_info_items)][HNS3_DBG_DATA_STR_LEN];
+ char *result[ARRAY_SIZE(coal_info_items)];
+ struct hns3_enet_tqp_vector *tqp_vector;
+ struct hns3_nic_priv *priv = h->priv;
+ char content[HNS3_DBG_INFO_LEN];
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(coal_info_items); i++)
+ result[i] = &data_str[i][0];
+
+ *pos += scnprintf(buf + *pos, len - *pos,
+ "%s interrupt coalesce info:\n",
+ is_tx ? "tx" : "rx");
+ hns3_dbg_fill_content(content, sizeof(content), coal_info_items,
+ NULL, ARRAY_SIZE(coal_info_items));
+ *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
+
+ for (i = 0; i < priv->vector_num; i++) {
+ tqp_vector = &priv->tqp_vector[i];
+ hns3_get_coal_info(tqp_vector, result, i, is_tx);
+ hns3_dbg_fill_content(content, sizeof(content), coal_info_items,
+ (const char **)result,
+ ARRAY_SIZE(coal_info_items));
+ *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
+ }
+}
+
+static int hns3_dbg_coal_info(struct hnae3_handle *h, char *buf, int len)
+{
+ int pos = 0;
+
+ hns3_dump_coal_info(h, buf, len, &pos, true);
+ pos += scnprintf(buf + pos, len - pos, "\n");
+ hns3_dump_coal_info(h, buf, len, &pos, false);
+
+ return 0;
+}
+
+static const struct hns3_dbg_item tx_spare_info_items[] = {
+ { "QUEUE_ID", 2 },
+ { "COPYBREAK", 2 },
+ { "LEN", 7 },
+ { "NTU", 4 },
+ { "NTC", 4 },
+ { "LTC", 4 },
+ { "DMA", 17 },
+};
+
+static void hns3_dbg_tx_spare_info(struct hns3_enet_ring *ring, char *buf,
+ int len, u32 ring_num, int *pos)
+{
+ char data_str[ARRAY_SIZE(tx_spare_info_items)][HNS3_DBG_DATA_STR_LEN];
+ struct hns3_tx_spare *tx_spare = ring->tx_spare;
+ char *result[ARRAY_SIZE(tx_spare_info_items)];
+ char content[HNS3_DBG_INFO_LEN];
+ u32 i, j;
+
+ if (!tx_spare) {
+ *pos += scnprintf(buf + *pos, len - *pos,
+ "tx spare buffer is not enabled\n");
+ return;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(tx_spare_info_items); i++)
+ result[i] = &data_str[i][0];
+
+ *pos += scnprintf(buf + *pos, len - *pos, "tx spare buffer info\n");
+ hns3_dbg_fill_content(content, sizeof(content), tx_spare_info_items,
+ NULL, ARRAY_SIZE(tx_spare_info_items));
+ *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
+
+ for (i = 0; i < ring_num; i++) {
+ j = 0;
+ sprintf(result[j++], "%u", i);
+ sprintf(result[j++], "%u", ring->tx_copybreak);
+ sprintf(result[j++], "%u", tx_spare->len);
+ sprintf(result[j++], "%u", tx_spare->next_to_use);
+ sprintf(result[j++], "%u", tx_spare->next_to_clean);
+ sprintf(result[j++], "%u", tx_spare->last_to_clean);
+ sprintf(result[j++], "%pad", &tx_spare->dma);
+ hns3_dbg_fill_content(content, sizeof(content),
+ tx_spare_info_items,
+ (const char **)result,
+ ARRAY_SIZE(tx_spare_info_items));
+ *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
+ }
+}
+
+static const struct hns3_dbg_item rx_queue_info_items[] = {
+ { "QUEUE_ID", 2 },
+ { "BD_NUM", 2 },
+ { "BD_LEN", 2 },
+ { "TAIL", 2 },
+ { "HEAD", 2 },
+ { "FBDNUM", 2 },
+ { "PKTNUM", 5 },
+ { "COPYBREAK", 2 },
+ { "RING_EN", 2 },
+ { "RX_RING_EN", 2 },
+ { "BASE_ADDR", 10 },
+};
+
+static void hns3_dump_rx_queue_info(struct hns3_enet_ring *ring,
+ struct hnae3_ae_dev *ae_dev, char **result,
+ u32 index)
+{
+ u32 base_add_l, base_add_h;
+ u32 j = 0;
+
+ sprintf(result[j++], "%u", index);
+
+ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_RX_RING_BD_NUM_REG));
+
+ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_RX_RING_BD_LEN_REG));
+
+ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_RX_RING_TAIL_REG));
+
+ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_RX_RING_HEAD_REG));
+
+ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_RX_RING_FBDNUM_REG));
+
+ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_RX_RING_PKTNUM_RECORD_REG));
+ sprintf(result[j++], "%u", ring->rx_copybreak);
+
+ sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_EN_REG) ? "on" : "off");
+
+ if (hnae3_ae_dev_tqp_txrx_indep_supported(ae_dev))
+ sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_RX_EN_REG) ? "on" : "off");
+ else
+ sprintf(result[j++], "%s", "NA");
+
+ base_add_h = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_RX_RING_BASEADDR_H_REG);
+ base_add_l = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_RX_RING_BASEADDR_L_REG);
+ sprintf(result[j++], "0x%08x%08x", base_add_h, base_add_l);
+}
+
+static int hns3_dbg_rx_queue_info(struct hnae3_handle *h,
+ char *buf, int len)
+{
+ char data_str[ARRAY_SIZE(rx_queue_info_items)][HNS3_DBG_DATA_STR_LEN];
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ char *result[ARRAY_SIZE(rx_queue_info_items)];
+ struct hns3_nic_priv *priv = h->priv;
+ char content[HNS3_DBG_INFO_LEN];
+ struct hns3_enet_ring *ring;
+ int pos = 0;
+ u32 i;
+
+ if (!priv->ring) {
+ dev_err(&h->pdev->dev, "priv->ring is NULL\n");
+ return -EFAULT;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(rx_queue_info_items); i++)
+ result[i] = &data_str[i][0];
+
+ hns3_dbg_fill_content(content, sizeof(content), rx_queue_info_items,
+ NULL, ARRAY_SIZE(rx_queue_info_items));
+ pos += scnprintf(buf + pos, len - pos, "%s", content);
+ for (i = 0; i < h->kinfo.num_tqps; i++) {
+ /* Each cycle needs to determine whether the instance is reset,
+ * to prevent reference to invalid memory. And need to ensure
+ * that the following code is executed within 100ms.
+ */
+ if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
+ test_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
+ return -EPERM;
+
+ ring = &priv->ring[(u32)(i + h->kinfo.num_tqps)];
+ hns3_dump_rx_queue_info(ring, ae_dev, result, i);
+ hns3_dbg_fill_content(content, sizeof(content),
+ rx_queue_info_items,
+ (const char **)result,
+ ARRAY_SIZE(rx_queue_info_items));
+ pos += scnprintf(buf + pos, len - pos, "%s", content);
+ }
+
+ return 0;
+}
+
+static const struct hns3_dbg_item tx_queue_info_items[] = {
+ { "QUEUE_ID", 2 },
+ { "BD_NUM", 2 },
+ { "TC", 2 },
+ { "TAIL", 2 },
+ { "HEAD", 2 },
+ { "FBDNUM", 2 },
+ { "OFFSET", 2 },
+ { "PKTNUM", 5 },
+ { "RING_EN", 2 },
+ { "TX_RING_EN", 2 },
+ { "BASE_ADDR", 10 },
+};
+
+static void hns3_dump_tx_queue_info(struct hns3_enet_ring *ring,
+ struct hnae3_ae_dev *ae_dev, char **result,
+ u32 index)
+{
+ u32 base_add_l, base_add_h;
+ u32 j = 0;
+
+ sprintf(result[j++], "%u", index);
+ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_TX_RING_BD_NUM_REG));
+
+ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_TX_RING_TC_REG));
+
+ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_TX_RING_TAIL_REG));
+
+ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_TX_RING_HEAD_REG));
+
+ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_TX_RING_FBDNUM_REG));
+
+ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_TX_RING_OFFSET_REG));
+
+ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_TX_RING_PKTNUM_RECORD_REG));
+
+ sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_EN_REG) ? "on" : "off");
+
+ if (hnae3_ae_dev_tqp_txrx_indep_supported(ae_dev))
+ sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_TX_EN_REG) ? "on" : "off");
+ else
+ sprintf(result[j++], "%s", "NA");
+
+ base_add_h = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_TX_RING_BASEADDR_H_REG);
+ base_add_l = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_TX_RING_BASEADDR_L_REG);
+ sprintf(result[j++], "0x%08x%08x", base_add_h, base_add_l);
+}
+
+static int hns3_dbg_tx_queue_info(struct hnae3_handle *h,
+ char *buf, int len)
+{
+ char data_str[ARRAY_SIZE(tx_queue_info_items)][HNS3_DBG_DATA_STR_LEN];
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ char *result[ARRAY_SIZE(tx_queue_info_items)];
+ struct hns3_nic_priv *priv = h->priv;
+ char content[HNS3_DBG_INFO_LEN];
+ struct hns3_enet_ring *ring;
+ int pos = 0;
+ u32 i;
+
+ if (!priv->ring) {
+ dev_err(&h->pdev->dev, "priv->ring is NULL\n");
+ return -EFAULT;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(tx_queue_info_items); i++)
+ result[i] = &data_str[i][0];
+
+ hns3_dbg_fill_content(content, sizeof(content), tx_queue_info_items,
+ NULL, ARRAY_SIZE(tx_queue_info_items));
+ pos += scnprintf(buf + pos, len - pos, "%s", content);
+
+ for (i = 0; i < h->kinfo.num_tqps; i++) {
+ /* Each cycle needs to determine whether the instance is reset,
+ * to prevent reference to invalid memory. And need to ensure
+ * that the following code is executed within 100ms.
+ */
+ if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
+ test_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
+ return -EPERM;
+
+ ring = &priv->ring[i];
+ hns3_dump_tx_queue_info(ring, ae_dev, result, i);
+ hns3_dbg_fill_content(content, sizeof(content),
+ tx_queue_info_items,
+ (const char **)result,
+ ARRAY_SIZE(tx_queue_info_items));
+ pos += scnprintf(buf + pos, len - pos, "%s", content);
+ }
+
+ hns3_dbg_tx_spare_info(ring, buf, len, h->kinfo.num_tqps, &pos);
+
+ return 0;
+}
+
+static const struct hns3_dbg_item queue_map_items[] = {
+ { "local_queue_id", 2 },
+ { "global_queue_id", 2 },
+ { "vector_id", 2 },
+};
+
+static int hns3_dbg_queue_map(struct hnae3_handle *h, char *buf, int len)
+{
+ char data_str[ARRAY_SIZE(queue_map_items)][HNS3_DBG_DATA_STR_LEN];
+ char *result[ARRAY_SIZE(queue_map_items)];
+ struct hns3_nic_priv *priv = h->priv;
+ char content[HNS3_DBG_INFO_LEN];
+ int pos = 0;
+ int j;
+ u32 i;
+
+ if (!h->ae_algo->ops->get_global_queue_id)
+ return -EOPNOTSUPP;
+
+ for (i = 0; i < ARRAY_SIZE(queue_map_items); i++)
+ result[i] = &data_str[i][0];
+
+ hns3_dbg_fill_content(content, sizeof(content), queue_map_items,
+ NULL, ARRAY_SIZE(queue_map_items));
+ pos += scnprintf(buf + pos, len - pos, "%s", content);
+ for (i = 0; i < h->kinfo.num_tqps; i++) {
+ if (!priv->ring || !priv->ring[i].tqp_vector)
+ continue;
+ j = 0;
+ sprintf(result[j++], "%u", i);
+ sprintf(result[j++], "%u",
+ h->ae_algo->ops->get_global_queue_id(h, i));
+ sprintf(result[j++], "%d",
+ priv->ring[i].tqp_vector->vector_irq);
+ hns3_dbg_fill_content(content, sizeof(content), queue_map_items,
+ (const char **)result,
+ ARRAY_SIZE(queue_map_items));
+ pos += scnprintf(buf + pos, len - pos, "%s", content);
+ }
+
+ return 0;
+}
+
+static const struct hns3_dbg_item rx_bd_info_items[] = {
+ { "BD_IDX", 3 },
+ { "L234_INFO", 2 },
+ { "PKT_LEN", 3 },
+ { "SIZE", 4 },
+ { "RSS_HASH", 4 },
+ { "FD_ID", 2 },
+ { "VLAN_TAG", 2 },
+ { "O_DM_VLAN_ID_FB", 2 },
+ { "OT_VLAN_TAG", 2 },
+ { "BD_BASE_INFO", 2 },
+ { "PTYPE", 2 },
+ { "HW_CSUM", 2 },
+};
+
+static void hns3_dump_rx_bd_info(struct hns3_nic_priv *priv,
+ struct hns3_desc *desc, char **result, int idx)
+{
+ unsigned int j = 0;
+
+ sprintf(result[j++], "%d", idx);
+ sprintf(result[j++], "%#x", le32_to_cpu(desc->rx.l234_info));
+ sprintf(result[j++], "%u", le16_to_cpu(desc->rx.pkt_len));
+ sprintf(result[j++], "%u", le16_to_cpu(desc->rx.size));
+ sprintf(result[j++], "%#x", le32_to_cpu(desc->rx.rss_hash));
+ sprintf(result[j++], "%u", le16_to_cpu(desc->rx.fd_id));
+ sprintf(result[j++], "%u", le16_to_cpu(desc->rx.vlan_tag));
+ sprintf(result[j++], "%u", le16_to_cpu(desc->rx.o_dm_vlan_id_fb));
+ sprintf(result[j++], "%u", le16_to_cpu(desc->rx.ot_vlan_tag));
+ sprintf(result[j++], "%#x", le32_to_cpu(desc->rx.bd_base_info));
+ if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state)) {
+ u32 ol_info = le32_to_cpu(desc->rx.ol_info);
+
+ sprintf(result[j++], "%5lu", hnae3_get_field(ol_info,
+ HNS3_RXD_PTYPE_M,
+ HNS3_RXD_PTYPE_S));
+ sprintf(result[j++], "%7u", le16_to_cpu(desc->csum));
+ } else {
+ sprintf(result[j++], "NA");
+ sprintf(result[j++], "NA");
+ }
+}
+
+static int hns3_dbg_rx_bd_info(struct hns3_dbg_data *d, char *buf, int len)
+{
+ char data_str[ARRAY_SIZE(rx_bd_info_items)][HNS3_DBG_DATA_STR_LEN];
+ struct hns3_nic_priv *priv = d->handle->priv;
+ char *result[ARRAY_SIZE(rx_bd_info_items)];
+ char content[HNS3_DBG_INFO_LEN];
+ struct hns3_enet_ring *ring;
+ struct hns3_desc *desc;
+ unsigned int i;
+ int pos = 0;
+
+ if (d->qid >= d->handle->kinfo.num_tqps) {
+ dev_err(&d->handle->pdev->dev,
+ "queue%u is not in use\n", d->qid);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(rx_bd_info_items); i++)
+ result[i] = &data_str[i][0];
+
+ pos += scnprintf(buf + pos, len - pos,
+ "Queue %u rx bd info:\n", d->qid);
+ hns3_dbg_fill_content(content, sizeof(content), rx_bd_info_items,
+ NULL, ARRAY_SIZE(rx_bd_info_items));
+ pos += scnprintf(buf + pos, len - pos, "%s", content);
+
+ ring = &priv->ring[d->qid + d->handle->kinfo.num_tqps];
+ for (i = 0; i < ring->desc_num; i++) {
+ desc = &ring->desc[i];
+
+ hns3_dump_rx_bd_info(priv, desc, result, i);
+ hns3_dbg_fill_content(content, sizeof(content),
+ rx_bd_info_items, (const char **)result,
+ ARRAY_SIZE(rx_bd_info_items));
+ pos += scnprintf(buf + pos, len - pos, "%s", content);
+ }
+
+ return 0;
+}
+
+static const struct hns3_dbg_item tx_bd_info_items[] = {
+ { "BD_IDX", 2 },
+ { "ADDRESS", 13 },
+ { "VLAN_TAG", 2 },
+ { "SIZE", 2 },
+ { "T_CS_VLAN_TSO", 2 },
+ { "OT_VLAN_TAG", 3 },
+ { "TV", 5 },
+ { "OLT_VLAN_LEN", 2 },
+ { "PAYLEN_OL4CS", 2 },
+ { "BD_FE_SC_VLD", 2 },
+ { "MSS_HW_CSUM", 0 },
+};
+
+static void hns3_dump_tx_bd_info(struct hns3_desc *desc, char **result, int idx)
+{
+ unsigned int j = 0;
+
+ sprintf(result[j++], "%d", idx);
+ sprintf(result[j++], "%#llx", le64_to_cpu(desc->addr));
+ sprintf(result[j++], "%u", le16_to_cpu(desc->tx.vlan_tag));
+ sprintf(result[j++], "%u", le16_to_cpu(desc->tx.send_size));
+ sprintf(result[j++], "%#x",
+ le32_to_cpu(desc->tx.type_cs_vlan_tso_len));
+ sprintf(result[j++], "%u", le16_to_cpu(desc->tx.outer_vlan_tag));
+ sprintf(result[j++], "%u", le16_to_cpu(desc->tx.tv));
+ sprintf(result[j++], "%u",
+ le32_to_cpu(desc->tx.ol_type_vlan_len_msec));
+ sprintf(result[j++], "%#x", le32_to_cpu(desc->tx.paylen_ol4cs));
+ sprintf(result[j++], "%#x", le16_to_cpu(desc->tx.bdtp_fe_sc_vld_ra_ri));
+ sprintf(result[j++], "%u", le16_to_cpu(desc->tx.mss_hw_csum));
+}
+
+static int hns3_dbg_tx_bd_info(struct hns3_dbg_data *d, char *buf, int len)
+{
+ char data_str[ARRAY_SIZE(tx_bd_info_items)][HNS3_DBG_DATA_STR_LEN];
+ struct hns3_nic_priv *priv = d->handle->priv;
+ char *result[ARRAY_SIZE(tx_bd_info_items)];
+ char content[HNS3_DBG_INFO_LEN];
+ struct hns3_enet_ring *ring;
+ struct hns3_desc *desc;
+ unsigned int i;
+ int pos = 0;
+
+ if (d->qid >= d->handle->kinfo.num_tqps) {
+ dev_err(&d->handle->pdev->dev,
+ "queue%u is not in use\n", d->qid);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(tx_bd_info_items); i++)
+ result[i] = &data_str[i][0];
+
+ pos += scnprintf(buf + pos, len - pos,
+ "Queue %u tx bd info:\n", d->qid);
+ hns3_dbg_fill_content(content, sizeof(content), tx_bd_info_items,
+ NULL, ARRAY_SIZE(tx_bd_info_items));
+ pos += scnprintf(buf + pos, len - pos, "%s", content);
+
+ ring = &priv->ring[d->qid];
+ for (i = 0; i < ring->desc_num; i++) {
+ desc = &ring->desc[i];
+
+ hns3_dump_tx_bd_info(desc, result, i);
+ hns3_dbg_fill_content(content, sizeof(content),
+ tx_bd_info_items, (const char **)result,
+ ARRAY_SIZE(tx_bd_info_items));
+ pos += scnprintf(buf + pos, len - pos, "%s", content);
+ }
+
+ return 0;
+}
+
+static void
+hns3_dbg_dev_caps(struct hnae3_handle *h, char *buf, int len, int *pos)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ const char * const str[] = {"no", "yes"};
+ unsigned long *caps = ae_dev->caps;
+ u32 i, state;
+
+ *pos += scnprintf(buf + *pos, len - *pos, "dev capability:\n");
+
+ for (i = 0; i < ARRAY_SIZE(hns3_dbg_cap); i++) {
+ state = test_bit(hns3_dbg_cap[i].cap_bit, caps);
+ *pos += scnprintf(buf + *pos, len - *pos, "%s: %s\n",
+ hns3_dbg_cap[i].name, str[state]);
+ }
+
+ *pos += scnprintf(buf + *pos, len - *pos, "\n");
+}
+
+static void
+hns3_dbg_dev_specs(struct hnae3_handle *h, char *buf, int len, int *pos)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ struct hnae3_dev_specs *dev_specs = &ae_dev->dev_specs;
+ struct hnae3_knic_private_info *kinfo = &h->kinfo;
+ struct net_device *dev = kinfo->netdev;
+
+ *pos += scnprintf(buf + *pos, len - *pos, "dev_spec:\n");
+ *pos += scnprintf(buf + *pos, len - *pos, "MAC entry num: %u\n",
+ dev_specs->mac_entry_num);
+ *pos += scnprintf(buf + *pos, len - *pos, "MNG entry num: %u\n",
+ dev_specs->mng_entry_num);
+ *pos += scnprintf(buf + *pos, len - *pos, "MAX non tso bd num: %u\n",
+ dev_specs->max_non_tso_bd_num);
+ *pos += scnprintf(buf + *pos, len - *pos, "RSS ind tbl size: %u\n",
+ dev_specs->rss_ind_tbl_size);
+ *pos += scnprintf(buf + *pos, len - *pos, "RSS key size: %u\n",
+ dev_specs->rss_key_size);
+ *pos += scnprintf(buf + *pos, len - *pos, "RSS size: %u\n",
+ kinfo->rss_size);
+ *pos += scnprintf(buf + *pos, len - *pos, "Allocated RSS size: %u\n",
+ kinfo->req_rss_size);
+ *pos += scnprintf(buf + *pos, len - *pos,
+ "Task queue pairs numbers: %u\n",
+ kinfo->num_tqps);
+ *pos += scnprintf(buf + *pos, len - *pos, "RX buffer length: %u\n",
+ kinfo->rx_buf_len);
+ *pos += scnprintf(buf + *pos, len - *pos, "Desc num per TX queue: %u\n",
+ kinfo->num_tx_desc);
+ *pos += scnprintf(buf + *pos, len - *pos, "Desc num per RX queue: %u\n",
+ kinfo->num_rx_desc);
+ *pos += scnprintf(buf + *pos, len - *pos,
+ "Total number of enabled TCs: %u\n",
+ kinfo->tc_info.num_tc);
+ *pos += scnprintf(buf + *pos, len - *pos, "MAX INT QL: %u\n",
+ dev_specs->int_ql_max);
+ *pos += scnprintf(buf + *pos, len - *pos, "MAX INT GL: %u\n",
+ dev_specs->max_int_gl);
+ *pos += scnprintf(buf + *pos, len - *pos, "MAX TM RATE: %u\n",
+ dev_specs->max_tm_rate);
+ *pos += scnprintf(buf + *pos, len - *pos, "MAX QSET number: %u\n",
+ dev_specs->max_qset_num);
+ *pos += scnprintf(buf + *pos, len - *pos, "umv size: %u\n",
+ dev_specs->umv_size);
+ *pos += scnprintf(buf + *pos, len - *pos, "mc mac size: %u\n",
+ dev_specs->mc_mac_size);
+ *pos += scnprintf(buf + *pos, len - *pos, "MAC statistics number: %u\n",
+ dev_specs->mac_stats_num);
+ *pos += scnprintf(buf + *pos, len - *pos,
+ "TX timeout threshold: %d seconds\n",
+ dev->watchdog_timeo / HZ);
+}
+
+static int hns3_dbg_dev_info(struct hnae3_handle *h, char *buf, int len)
+{
+ int pos = 0;
+
+ hns3_dbg_dev_caps(h, buf, len, &pos);
+
+ hns3_dbg_dev_specs(h, buf, len, &pos);
+
+ return 0;
+}
+
+static const struct hns3_dbg_item page_pool_info_items[] = {
+ { "QUEUE_ID", 2 },
+ { "ALLOCATE_CNT", 2 },
+ { "FREE_CNT", 6 },
+ { "POOL_SIZE(PAGE_NUM)", 2 },
+ { "ORDER", 2 },
+ { "NUMA_ID", 2 },
+ { "MAX_LEN", 2 },
+};
+
+static void hns3_dump_page_pool_info(struct hns3_enet_ring *ring,
+ char **result, u32 index)
+{
+ u32 j = 0;
+
+ sprintf(result[j++], "%u", index);
+ sprintf(result[j++], "%u",
+ READ_ONCE(ring->page_pool->pages_state_hold_cnt));
+ sprintf(result[j++], "%d",
+ atomic_read(&ring->page_pool->pages_state_release_cnt));
+ sprintf(result[j++], "%u", ring->page_pool->p.pool_size);
+ sprintf(result[j++], "%u", ring->page_pool->p.order);
+ sprintf(result[j++], "%d", ring->page_pool->p.nid);
+ sprintf(result[j++], "%uK", ring->page_pool->p.max_len / 1024);
+}
+
+static int
+hns3_dbg_page_pool_info(struct hnae3_handle *h, char *buf, int len)
+{
+ char data_str[ARRAY_SIZE(page_pool_info_items)][HNS3_DBG_DATA_STR_LEN];
+ char *result[ARRAY_SIZE(page_pool_info_items)];
+ struct hns3_nic_priv *priv = h->priv;
+ char content[HNS3_DBG_INFO_LEN];
+ struct hns3_enet_ring *ring;
+ int pos = 0;
+ u32 i;
+
+ if (!priv->ring) {
+ dev_err(&h->pdev->dev, "priv->ring is NULL\n");
+ return -EFAULT;
+ }
+
+ if (!priv->ring[h->kinfo.num_tqps].page_pool) {
+ dev_err(&h->pdev->dev, "page pool is not initialized\n");
+ return -EFAULT;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(page_pool_info_items); i++)
+ result[i] = &data_str[i][0];
+
+ hns3_dbg_fill_content(content, sizeof(content), page_pool_info_items,
+ NULL, ARRAY_SIZE(page_pool_info_items));
+ pos += scnprintf(buf + pos, len - pos, "%s", content);
+ for (i = 0; i < h->kinfo.num_tqps; i++) {
+ if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
+ test_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
+ return -EPERM;
+ ring = &priv->ring[(u32)(i + h->kinfo.num_tqps)];
+ hns3_dump_page_pool_info(ring, result, i);
+ hns3_dbg_fill_content(content, sizeof(content),
+ page_pool_info_items,
+ (const char **)result,
+ ARRAY_SIZE(page_pool_info_items));
+ pos += scnprintf(buf + pos, len - pos, "%s", content);
+ }
+
+ return 0;
+}
+
+static int hns3_dbg_get_cmd_index(struct hns3_dbg_data *dbg_data, u32 *index)
+{
+ u32 i;
+
+ for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++) {
+ if (hns3_dbg_cmd[i].cmd == dbg_data->cmd) {
+ *index = i;
+ return 0;
+ }
+ }
+
+ dev_err(&dbg_data->handle->pdev->dev, "unknown command(%d)\n",
+ dbg_data->cmd);
+ return -EINVAL;
+}
+
+static const struct hns3_dbg_func hns3_dbg_cmd_func[] = {
+ {
+ .cmd = HNAE3_DBG_CMD_QUEUE_MAP,
+ .dbg_dump = hns3_dbg_queue_map,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_DEV_INFO,
+ .dbg_dump = hns3_dbg_dev_info,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_TX_BD,
+ .dbg_dump_bd = hns3_dbg_tx_bd_info,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_RX_BD,
+ .dbg_dump_bd = hns3_dbg_rx_bd_info,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_RX_QUEUE_INFO,
+ .dbg_dump = hns3_dbg_rx_queue_info,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_TX_QUEUE_INFO,
+ .dbg_dump = hns3_dbg_tx_queue_info,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_PAGE_POOL_INFO,
+ .dbg_dump = hns3_dbg_page_pool_info,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_COAL_INFO,
+ .dbg_dump = hns3_dbg_coal_info,
+ },
+};
+
+static int hns3_dbg_read_cmd(struct hns3_dbg_data *dbg_data,
+ enum hnae3_dbg_cmd cmd, char *buf, int len)
+{
+ const struct hnae3_ae_ops *ops = dbg_data->handle->ae_algo->ops;
+ const struct hns3_dbg_func *cmd_func;
+ u32 i;
+
+ for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd_func); i++) {
+ if (cmd == hns3_dbg_cmd_func[i].cmd) {
+ cmd_func = &hns3_dbg_cmd_func[i];
+ if (cmd_func->dbg_dump)
+ return cmd_func->dbg_dump(dbg_data->handle, buf,
+ len);
+ else
+ return cmd_func->dbg_dump_bd(dbg_data, buf,
+ len);
+ }
+ }
+
+ if (!ops->dbg_read_cmd)
+ return -EOPNOTSUPP;
+
+ return ops->dbg_read_cmd(dbg_data->handle, cmd, buf, len);
+}
+
+static ssize_t hns3_dbg_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct hns3_dbg_data *dbg_data = filp->private_data;
+ struct hnae3_handle *handle = dbg_data->handle;
+ struct hns3_nic_priv *priv = handle->priv;
+ ssize_t size = 0;
+ char **save_buf;
+ char *read_buf;
+ u32 index;
+ int ret;
+
+ ret = hns3_dbg_get_cmd_index(dbg_data, &index);
+ if (ret)
+ return ret;
+
+ mutex_lock(&handle->dbgfs_lock);
+ save_buf = &handle->dbgfs_buf[index];
+
+ if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
+ test_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ if (*save_buf) {
+ read_buf = *save_buf;
+ } else {
+ read_buf = kvzalloc(hns3_dbg_cmd[index].buf_len, GFP_KERNEL);
+ if (!read_buf) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* save the buffer addr until the last read operation */
+ *save_buf = read_buf;
+
+ /* get data ready for the first time to read */
+ ret = hns3_dbg_read_cmd(dbg_data, hns3_dbg_cmd[index].cmd,
+ read_buf, hns3_dbg_cmd[index].buf_len);
+ if (ret)
+ goto out;
+ }
+
+ size = simple_read_from_buffer(buffer, count, ppos, read_buf,
+ strlen(read_buf));
+ if (size > 0) {
+ mutex_unlock(&handle->dbgfs_lock);
+ return size;
+ }
+
+out:
+ /* free the buffer for the last read operation */
+ if (*save_buf) {
+ kvfree(*save_buf);
+ *save_buf = NULL;
+ }
+
+ mutex_unlock(&handle->dbgfs_lock);
+ return ret;
+}
+
+static const struct file_operations hns3_dbg_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = hns3_dbg_read,
+};
+
+static int hns3_dbg_bd_file_init(struct hnae3_handle *handle, u32 cmd)
+{
+ struct dentry *entry_dir;
+ struct hns3_dbg_data *data;
+ u16 max_queue_num;
+ unsigned int i;
+
+ entry_dir = hns3_dbg_dentry[hns3_dbg_cmd[cmd].dentry].dentry;
+ max_queue_num = hns3_get_max_available_channels(handle);
+ data = devm_kzalloc(&handle->pdev->dev, max_queue_num * sizeof(*data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ for (i = 0; i < max_queue_num; i++) {
+ char name[HNS3_DBG_FILE_NAME_LEN];
+
+ data[i].handle = handle;
+ data[i].cmd = hns3_dbg_cmd[cmd].cmd;
+ data[i].qid = i;
+ sprintf(name, "%s%u", hns3_dbg_cmd[cmd].name, i);
+ debugfs_create_file(name, 0400, entry_dir, &data[i],
+ &hns3_dbg_fops);
+ }
+
+ return 0;
+}
+
+static int
+hns3_dbg_common_file_init(struct hnae3_handle *handle, u32 cmd)
+{
+ struct hns3_dbg_data *data;
+ struct dentry *entry_dir;
+
+ data = devm_kzalloc(&handle->pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->handle = handle;
+ data->cmd = hns3_dbg_cmd[cmd].cmd;
+ entry_dir = hns3_dbg_dentry[hns3_dbg_cmd[cmd].dentry].dentry;
+ debugfs_create_file(hns3_dbg_cmd[cmd].name, 0400, entry_dir,
+ data, &hns3_dbg_fops);
+
+ return 0;
+}
+
+int hns3_dbg_init(struct hnae3_handle *handle)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
+ const char *name = pci_name(handle->pdev);
+ int ret;
+ u32 i;
+
+ handle->dbgfs_buf = devm_kcalloc(&handle->pdev->dev,
+ ARRAY_SIZE(hns3_dbg_cmd),
+ sizeof(*handle->dbgfs_buf),
+ GFP_KERNEL);
+ if (!handle->dbgfs_buf)
+ return -ENOMEM;
+
+ hns3_dbg_dentry[HNS3_DBG_DENTRY_COMMON].dentry =
+ debugfs_create_dir(name, hns3_dbgfs_root);
+ handle->hnae3_dbgfs = hns3_dbg_dentry[HNS3_DBG_DENTRY_COMMON].dentry;
+
+ for (i = 0; i < HNS3_DBG_DENTRY_COMMON; i++)
+ hns3_dbg_dentry[i].dentry =
+ debugfs_create_dir(hns3_dbg_dentry[i].name,
+ handle->hnae3_dbgfs);
+
+ mutex_init(&handle->dbgfs_lock);
+
+ for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++) {
+ if ((hns3_dbg_cmd[i].cmd == HNAE3_DBG_CMD_TM_NODES &&
+ ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) ||
+ (hns3_dbg_cmd[i].cmd == HNAE3_DBG_CMD_PTP_INFO &&
+ !test_bit(HNAE3_DEV_SUPPORT_PTP_B, ae_dev->caps)))
+ continue;
+
+ if (!hns3_dbg_cmd[i].init) {
+ dev_err(&handle->pdev->dev,
+ "cmd %s lack of init func\n",
+ hns3_dbg_cmd[i].name);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = hns3_dbg_cmd[i].init(handle, i);
+ if (ret) {
+ dev_err(&handle->pdev->dev, "failed to init cmd %s\n",
+ hns3_dbg_cmd[i].name);
+ goto out;
+ }
+ }
+
+ return 0;
+
+out:
+ debugfs_remove_recursive(handle->hnae3_dbgfs);
+ handle->hnae3_dbgfs = NULL;
+ mutex_destroy(&handle->dbgfs_lock);
+ return ret;
+}
+
+void hns3_dbg_uninit(struct hnae3_handle *handle)
+{
+ u32 i;
+
+ debugfs_remove_recursive(handle->hnae3_dbgfs);
+ handle->hnae3_dbgfs = NULL;
+
+ for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++)
+ if (handle->dbgfs_buf[i]) {
+ kvfree(handle->dbgfs_buf[i]);
+ handle->dbgfs_buf[i] = NULL;
+ }
+
+ mutex_destroy(&handle->dbgfs_lock);
+}
+
+void hns3_dbg_register_debugfs(const char *debugfs_dir_name)
+{
+ hns3_dbgfs_root = debugfs_create_dir(debugfs_dir_name, NULL);
+}
+
+void hns3_dbg_unregister_debugfs(void)
+{
+ debugfs_remove_recursive(hns3_dbgfs_root);
+ hns3_dbgfs_root = NULL;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h
new file mode 100644
index 0000000000..4a5ef8a90a
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2021 Hisilicon Limited. */
+
+#ifndef __HNS3_DEBUGFS_H
+#define __HNS3_DEBUGFS_H
+
+#include "hnae3.h"
+
+#define HNS3_DBG_READ_LEN 65536
+#define HNS3_DBG_READ_LEN_128KB 0x20000
+#define HNS3_DBG_READ_LEN_1MB 0x100000
+#define HNS3_DBG_READ_LEN_4MB 0x400000
+#define HNS3_DBG_READ_LEN_5MB 0x500000
+#define HNS3_DBG_WRITE_LEN 1024
+
+#define HNS3_DBG_DATA_STR_LEN 32
+#define HNS3_DBG_INFO_LEN 256
+#define HNS3_DBG_ITEM_NAME_LEN 32
+#define HNS3_DBG_FILE_NAME_LEN 16
+
+struct hns3_dbg_item {
+ char name[HNS3_DBG_ITEM_NAME_LEN];
+ u16 interval; /* blank numbers after the item */
+};
+
+struct hns3_dbg_data {
+ struct hnae3_handle *handle;
+ enum hnae3_dbg_cmd cmd;
+ u16 qid;
+};
+
+enum hns3_dbg_dentry_type {
+ HNS3_DBG_DENTRY_TM,
+ HNS3_DBG_DENTRY_TX_BD,
+ HNS3_DBG_DENTRY_RX_BD,
+ HNS3_DBG_DENTRY_MAC,
+ HNS3_DBG_DENTRY_REG,
+ HNS3_DBG_DENTRY_QUEUE,
+ HNS3_DBG_DENTRY_FD,
+ HNS3_DBG_DENTRY_COMMON,
+};
+
+struct hns3_dbg_dentry_info {
+ const char *name;
+ struct dentry *dentry;
+};
+
+struct hns3_dbg_cmd_info {
+ const char *name;
+ enum hnae3_dbg_cmd cmd;
+ enum hns3_dbg_dentry_type dentry;
+ u32 buf_len;
+ int (*init)(struct hnae3_handle *handle, unsigned int cmd);
+};
+
+struct hns3_dbg_func {
+ enum hnae3_dbg_cmd cmd;
+ int (*dbg_dump)(struct hnae3_handle *handle, char *buf, int len);
+ int (*dbg_dump_bd)(struct hns3_dbg_data *data, char *buf, int len);
+};
+
+struct hns3_dbg_cap_info {
+ const char *name;
+ enum HNAE3_DEV_CAP_BITS cap_bit;
+};
+
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
new file mode 100644
index 0000000000..677cfaa5fe
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -0,0 +1,6013 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
+
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
+#ifdef CONFIG_RFS_ACCEL
+#include <linux/cpu_rmap.h>
+#endif
+#include <linux/if_vlan.h>
+#include <linux/irq.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/skbuff.h>
+#include <linux/sctp.h>
+#include <net/gre.h>
+#include <net/gro.h>
+#include <net/ip6_checksum.h>
+#include <net/page_pool/helpers.h>
+#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
+#include <net/tcp.h>
+#include <net/vxlan.h>
+#include <net/geneve.h>
+
+#include "hnae3.h"
+#include "hns3_enet.h"
+/* All hns3 tracepoints are defined by the include below, which
+ * must be included exactly once across the whole kernel with
+ * CREATE_TRACE_POINTS defined
+ */
+#define CREATE_TRACE_POINTS
+#include "hns3_trace.h"
+
+#define hns3_set_field(origin, shift, val) ((origin) |= (val) << (shift))
+#define hns3_tx_bd_count(S) DIV_ROUND_UP(S, HNS3_MAX_BD_SIZE)
+
+#define hns3_rl_err(fmt, ...) \
+ do { \
+ if (net_ratelimit()) \
+ netdev_err(fmt, ##__VA_ARGS__); \
+ } while (0)
+
+static void hns3_clear_all_ring(struct hnae3_handle *h, bool force);
+
+static const char hns3_driver_name[] = "hns3";
+static const char hns3_driver_string[] =
+ "Hisilicon Ethernet Network Driver for Hip08 Family";
+static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation.";
+static struct hnae3_client client;
+
+static int debug = -1;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, " Network interface message level setting");
+
+static unsigned int tx_sgl = 1;
+module_param(tx_sgl, uint, 0600);
+MODULE_PARM_DESC(tx_sgl, "Minimum number of frags when using dma_map_sg() to optimize the IOMMU mapping");
+
+static bool page_pool_enabled = true;
+module_param(page_pool_enabled, bool, 0400);
+
+#define HNS3_SGL_SIZE(nfrag) (sizeof(struct scatterlist) * (nfrag) + \
+ sizeof(struct sg_table))
+#define HNS3_MAX_SGL_SIZE ALIGN(HNS3_SGL_SIZE(HNS3_MAX_TSO_BD_NUM), \
+ dma_get_cache_alignment())
+
+#define DEFAULT_MSG_LEVEL (NETIF_MSG_PROBE | NETIF_MSG_LINK | \
+ NETIF_MSG_IFDOWN | NETIF_MSG_IFUP)
+
+#define HNS3_INNER_VLAN_TAG 1
+#define HNS3_OUTER_VLAN_TAG 2
+
+#define HNS3_MIN_TX_LEN 33U
+#define HNS3_MIN_TUN_PKT_LEN 65U
+
+/* hns3_pci_tbl - PCI Device ID Table
+ *
+ * Last entry must be all 0s
+ *
+ * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
+ * Class, Class Mask, private data (not used) }
+ */
+static const struct pci_device_id hns3_pci_tbl[] = {
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA),
+ HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC),
+ HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA),
+ HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC),
+ HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC),
+ HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA),
+ HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_VF), 0},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF),
+ HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
+ /* required last entry */
+ {0,}
+};
+MODULE_DEVICE_TABLE(pci, hns3_pci_tbl);
+
+#define HNS3_RX_PTYPE_ENTRY(ptype, l, s, t, h) \
+ { ptype, \
+ l, \
+ CHECKSUM_##s, \
+ HNS3_L3_TYPE_##t, \
+ 1, \
+ h}
+
+#define HNS3_RX_PTYPE_UNUSED_ENTRY(ptype) \
+ { ptype, 0, CHECKSUM_NONE, HNS3_L3_TYPE_PARSE_FAIL, 0, \
+ PKT_HASH_TYPE_NONE }
+
+static const struct hns3_rx_ptype hns3_rx_ptype_tbl[] = {
+ HNS3_RX_PTYPE_UNUSED_ENTRY(0),
+ HNS3_RX_PTYPE_ENTRY(1, 0, COMPLETE, ARP, PKT_HASH_TYPE_NONE),
+ HNS3_RX_PTYPE_ENTRY(2, 0, COMPLETE, RARP, PKT_HASH_TYPE_NONE),
+ HNS3_RX_PTYPE_ENTRY(3, 0, COMPLETE, LLDP, PKT_HASH_TYPE_NONE),
+ HNS3_RX_PTYPE_ENTRY(4, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE),
+ HNS3_RX_PTYPE_ENTRY(5, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE),
+ HNS3_RX_PTYPE_ENTRY(6, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE),
+ HNS3_RX_PTYPE_ENTRY(7, 0, COMPLETE, CNM, PKT_HASH_TYPE_NONE),
+ HNS3_RX_PTYPE_ENTRY(8, 0, NONE, PARSE_FAIL, PKT_HASH_TYPE_NONE),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(9),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(10),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(11),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(12),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(13),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(14),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(15),
+ HNS3_RX_PTYPE_ENTRY(16, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE),
+ HNS3_RX_PTYPE_ENTRY(17, 0, COMPLETE, IPV4, PKT_HASH_TYPE_NONE),
+ HNS3_RX_PTYPE_ENTRY(18, 0, COMPLETE, IPV4, PKT_HASH_TYPE_NONE),
+ HNS3_RX_PTYPE_ENTRY(19, 0, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4),
+ HNS3_RX_PTYPE_ENTRY(20, 0, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4),
+ HNS3_RX_PTYPE_ENTRY(21, 0, NONE, IPV4, PKT_HASH_TYPE_NONE),
+ HNS3_RX_PTYPE_ENTRY(22, 0, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4),
+ HNS3_RX_PTYPE_ENTRY(23, 0, NONE, IPV4, PKT_HASH_TYPE_L3),
+ HNS3_RX_PTYPE_ENTRY(24, 0, NONE, IPV4, PKT_HASH_TYPE_L3),
+ HNS3_RX_PTYPE_ENTRY(25, 0, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(26),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(27),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(28),
+ HNS3_RX_PTYPE_ENTRY(29, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE),
+ HNS3_RX_PTYPE_ENTRY(30, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE),
+ HNS3_RX_PTYPE_ENTRY(31, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3),
+ HNS3_RX_PTYPE_ENTRY(32, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3),
+ HNS3_RX_PTYPE_ENTRY(33, 1, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4),
+ HNS3_RX_PTYPE_ENTRY(34, 1, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4),
+ HNS3_RX_PTYPE_ENTRY(35, 1, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4),
+ HNS3_RX_PTYPE_ENTRY(36, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3),
+ HNS3_RX_PTYPE_ENTRY(37, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(38),
+ HNS3_RX_PTYPE_ENTRY(39, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3),
+ HNS3_RX_PTYPE_ENTRY(40, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3),
+ HNS3_RX_PTYPE_ENTRY(41, 1, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4),
+ HNS3_RX_PTYPE_ENTRY(42, 1, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4),
+ HNS3_RX_PTYPE_ENTRY(43, 1, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4),
+ HNS3_RX_PTYPE_ENTRY(44, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3),
+ HNS3_RX_PTYPE_ENTRY(45, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(46),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(47),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(48),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(49),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(50),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(51),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(52),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(53),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(54),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(55),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(56),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(57),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(58),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(59),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(60),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(61),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(62),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(63),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(64),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(65),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(66),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(67),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(68),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(69),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(70),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(71),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(72),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(73),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(74),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(75),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(76),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(77),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(78),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(79),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(80),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(81),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(82),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(83),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(84),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(85),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(86),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(87),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(88),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(89),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(90),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(91),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(92),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(93),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(94),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(95),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(96),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(97),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(98),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(99),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(100),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(101),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(102),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(103),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(104),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(105),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(106),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(107),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(108),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(109),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(110),
+ HNS3_RX_PTYPE_ENTRY(111, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3),
+ HNS3_RX_PTYPE_ENTRY(112, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3),
+ HNS3_RX_PTYPE_ENTRY(113, 0, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4),
+ HNS3_RX_PTYPE_ENTRY(114, 0, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4),
+ HNS3_RX_PTYPE_ENTRY(115, 0, NONE, IPV6, PKT_HASH_TYPE_L3),
+ HNS3_RX_PTYPE_ENTRY(116, 0, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4),
+ HNS3_RX_PTYPE_ENTRY(117, 0, NONE, IPV6, PKT_HASH_TYPE_L3),
+ HNS3_RX_PTYPE_ENTRY(118, 0, NONE, IPV6, PKT_HASH_TYPE_L3),
+ HNS3_RX_PTYPE_ENTRY(119, 0, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(120),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(121),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(122),
+ HNS3_RX_PTYPE_ENTRY(123, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE),
+ HNS3_RX_PTYPE_ENTRY(124, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE),
+ HNS3_RX_PTYPE_ENTRY(125, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3),
+ HNS3_RX_PTYPE_ENTRY(126, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3),
+ HNS3_RX_PTYPE_ENTRY(127, 1, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4),
+ HNS3_RX_PTYPE_ENTRY(128, 1, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4),
+ HNS3_RX_PTYPE_ENTRY(129, 1, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4),
+ HNS3_RX_PTYPE_ENTRY(130, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3),
+ HNS3_RX_PTYPE_ENTRY(131, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(132),
+ HNS3_RX_PTYPE_ENTRY(133, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3),
+ HNS3_RX_PTYPE_ENTRY(134, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3),
+ HNS3_RX_PTYPE_ENTRY(135, 1, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4),
+ HNS3_RX_PTYPE_ENTRY(136, 1, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4),
+ HNS3_RX_PTYPE_ENTRY(137, 1, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4),
+ HNS3_RX_PTYPE_ENTRY(138, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3),
+ HNS3_RX_PTYPE_ENTRY(139, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(140),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(141),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(142),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(143),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(144),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(145),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(146),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(147),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(148),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(149),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(150),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(151),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(152),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(153),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(154),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(155),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(156),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(157),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(158),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(159),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(160),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(161),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(162),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(163),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(164),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(165),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(166),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(167),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(168),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(169),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(170),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(171),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(172),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(173),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(174),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(175),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(176),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(177),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(178),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(179),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(180),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(181),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(182),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(183),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(184),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(185),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(186),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(187),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(188),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(189),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(190),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(191),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(192),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(193),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(194),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(195),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(196),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(197),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(198),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(199),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(200),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(201),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(202),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(203),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(204),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(205),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(206),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(207),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(208),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(209),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(210),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(211),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(212),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(213),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(214),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(215),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(216),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(217),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(218),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(219),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(220),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(221),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(222),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(223),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(224),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(225),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(226),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(227),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(228),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(229),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(230),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(231),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(232),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(233),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(234),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(235),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(236),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(237),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(238),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(239),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(240),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(241),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(242),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(243),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(244),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(245),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(246),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(247),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(248),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(249),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(250),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(251),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(252),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(253),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(254),
+ HNS3_RX_PTYPE_UNUSED_ENTRY(255),
+};
+
+#define HNS3_INVALID_PTYPE \
+ ARRAY_SIZE(hns3_rx_ptype_tbl)
+
+static irqreturn_t hns3_irq_handle(int irq, void *vector)
+{
+ struct hns3_enet_tqp_vector *tqp_vector = vector;
+
+ napi_schedule_irqoff(&tqp_vector->napi);
+ tqp_vector->event_cnt++;
+
+ return IRQ_HANDLED;
+}
+
+static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv)
+{
+ struct hns3_enet_tqp_vector *tqp_vectors;
+ unsigned int i;
+
+ for (i = 0; i < priv->vector_num; i++) {
+ tqp_vectors = &priv->tqp_vector[i];
+
+ if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED)
+ continue;
+
+ /* clear the affinity mask */
+ irq_set_affinity_hint(tqp_vectors->vector_irq, NULL);
+
+ /* release the irq resource */
+ free_irq(tqp_vectors->vector_irq, tqp_vectors);
+ tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED;
+ }
+}
+
+static int hns3_nic_init_irq(struct hns3_nic_priv *priv)
+{
+ struct hns3_enet_tqp_vector *tqp_vectors;
+ int txrx_int_idx = 0;
+ int rx_int_idx = 0;
+ int tx_int_idx = 0;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < priv->vector_num; i++) {
+ tqp_vectors = &priv->tqp_vector[i];
+
+ if (tqp_vectors->irq_init_flag == HNS3_VECTOR_INITED)
+ continue;
+
+ if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) {
+ snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN,
+ "%s-%s-%s-%d", hns3_driver_name,
+ pci_name(priv->ae_handle->pdev),
+ "TxRx", txrx_int_idx++);
+ txrx_int_idx++;
+ } else if (tqp_vectors->rx_group.ring) {
+ snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN,
+ "%s-%s-%s-%d", hns3_driver_name,
+ pci_name(priv->ae_handle->pdev),
+ "Rx", rx_int_idx++);
+ } else if (tqp_vectors->tx_group.ring) {
+ snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN,
+ "%s-%s-%s-%d", hns3_driver_name,
+ pci_name(priv->ae_handle->pdev),
+ "Tx", tx_int_idx++);
+ } else {
+ /* Skip this unused q_vector */
+ continue;
+ }
+
+ tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0';
+
+ irq_set_status_flags(tqp_vectors->vector_irq, IRQ_NOAUTOEN);
+ ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0,
+ tqp_vectors->name, tqp_vectors);
+ if (ret) {
+ netdev_err(priv->netdev, "request irq(%d) fail\n",
+ tqp_vectors->vector_irq);
+ hns3_nic_uninit_irq(priv);
+ return ret;
+ }
+
+ irq_set_affinity_hint(tqp_vectors->vector_irq,
+ &tqp_vectors->affinity_mask);
+
+ tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED;
+ }
+
+ return 0;
+}
+
+static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector,
+ u32 mask_en)
+{
+ writel(mask_en, tqp_vector->mask_addr);
+}
+
+static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector)
+{
+ napi_enable(&tqp_vector->napi);
+ enable_irq(tqp_vector->vector_irq);
+
+ /* enable vector */
+ hns3_mask_vector_irq(tqp_vector, 1);
+}
+
+static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector)
+{
+ /* disable vector */
+ hns3_mask_vector_irq(tqp_vector, 0);
+
+ disable_irq(tqp_vector->vector_irq);
+ napi_disable(&tqp_vector->napi);
+ cancel_work_sync(&tqp_vector->rx_group.dim.work);
+ cancel_work_sync(&tqp_vector->tx_group.dim.work);
+}
+
+void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
+ u32 rl_value)
+{
+ u32 rl_reg = hns3_rl_usec_to_reg(rl_value);
+
+ /* this defines the configuration for RL (Interrupt Rate Limiter).
+ * Rl defines rate of interrupts i.e. number of interrupts-per-second
+ * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
+ */
+ if (rl_reg > 0 && !tqp_vector->tx_group.coal.adapt_enable &&
+ !tqp_vector->rx_group.coal.adapt_enable)
+ /* According to the hardware, the range of rl_reg is
+ * 0-59 and the unit is 4.
+ */
+ rl_reg |= HNS3_INT_RL_ENABLE_MASK;
+
+ writel(rl_reg, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET);
+}
+
+void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector,
+ u32 gl_value)
+{
+ u32 new_val;
+
+ if (tqp_vector->rx_group.coal.unit_1us)
+ new_val = gl_value | HNS3_INT_GL_1US;
+ else
+ new_val = hns3_gl_usec_to_reg(gl_value);
+
+ writel(new_val, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET);
+}
+
+void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector,
+ u32 gl_value)
+{
+ u32 new_val;
+
+ if (tqp_vector->tx_group.coal.unit_1us)
+ new_val = gl_value | HNS3_INT_GL_1US;
+ else
+ new_val = hns3_gl_usec_to_reg(gl_value);
+
+ writel(new_val, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET);
+}
+
+void hns3_set_vector_coalesce_tx_ql(struct hns3_enet_tqp_vector *tqp_vector,
+ u32 ql_value)
+{
+ writel(ql_value, tqp_vector->mask_addr + HNS3_VECTOR_TX_QL_OFFSET);
+}
+
+void hns3_set_vector_coalesce_rx_ql(struct hns3_enet_tqp_vector *tqp_vector,
+ u32 ql_value)
+{
+ writel(ql_value, tqp_vector->mask_addr + HNS3_VECTOR_RX_QL_OFFSET);
+}
+
+static void hns3_vector_coalesce_init(struct hns3_enet_tqp_vector *tqp_vector,
+ struct hns3_nic_priv *priv)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
+ struct hns3_enet_coalesce *tx_coal = &tqp_vector->tx_group.coal;
+ struct hns3_enet_coalesce *rx_coal = &tqp_vector->rx_group.coal;
+ struct hns3_enet_coalesce *ptx_coal = &priv->tx_coal;
+ struct hns3_enet_coalesce *prx_coal = &priv->rx_coal;
+
+ tx_coal->adapt_enable = ptx_coal->adapt_enable;
+ rx_coal->adapt_enable = prx_coal->adapt_enable;
+
+ tx_coal->int_gl = ptx_coal->int_gl;
+ rx_coal->int_gl = prx_coal->int_gl;
+
+ rx_coal->flow_level = prx_coal->flow_level;
+ tx_coal->flow_level = ptx_coal->flow_level;
+
+ /* device version above V3(include V3), GL can configure 1us
+ * unit, so uses 1us unit.
+ */
+ if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) {
+ tx_coal->unit_1us = 1;
+ rx_coal->unit_1us = 1;
+ }
+
+ if (ae_dev->dev_specs.int_ql_max) {
+ tx_coal->ql_enable = 1;
+ rx_coal->ql_enable = 1;
+ tx_coal->int_ql_max = ae_dev->dev_specs.int_ql_max;
+ rx_coal->int_ql_max = ae_dev->dev_specs.int_ql_max;
+ tx_coal->int_ql = ptx_coal->int_ql;
+ rx_coal->int_ql = prx_coal->int_ql;
+ }
+}
+
+static void
+hns3_vector_coalesce_init_hw(struct hns3_enet_tqp_vector *tqp_vector,
+ struct hns3_nic_priv *priv)
+{
+ struct hns3_enet_coalesce *tx_coal = &tqp_vector->tx_group.coal;
+ struct hns3_enet_coalesce *rx_coal = &tqp_vector->rx_group.coal;
+ struct hnae3_handle *h = priv->ae_handle;
+
+ hns3_set_vector_coalesce_tx_gl(tqp_vector, tx_coal->int_gl);
+ hns3_set_vector_coalesce_rx_gl(tqp_vector, rx_coal->int_gl);
+ hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting);
+
+ if (tx_coal->ql_enable)
+ hns3_set_vector_coalesce_tx_ql(tqp_vector, tx_coal->int_ql);
+
+ if (rx_coal->ql_enable)
+ hns3_set_vector_coalesce_rx_ql(tqp_vector, rx_coal->int_ql);
+}
+
+static int hns3_nic_set_real_num_queue(struct net_device *netdev)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ struct hnae3_knic_private_info *kinfo = &h->kinfo;
+ struct hnae3_tc_info *tc_info = &kinfo->tc_info;
+ unsigned int queue_size = kinfo->num_tqps;
+ int i, ret;
+
+ if (tc_info->num_tc <= 1 && !tc_info->mqprio_active) {
+ netdev_reset_tc(netdev);
+ } else {
+ ret = netdev_set_num_tc(netdev, tc_info->num_tc);
+ if (ret) {
+ netdev_err(netdev,
+ "netdev_set_num_tc fail, ret=%d!\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < tc_info->num_tc; i++)
+ netdev_set_tc_queue(netdev, i, tc_info->tqp_count[i],
+ tc_info->tqp_offset[i]);
+ }
+
+ ret = netif_set_real_num_tx_queues(netdev, queue_size);
+ if (ret) {
+ netdev_err(netdev,
+ "netif_set_real_num_tx_queues fail, ret=%d!\n", ret);
+ return ret;
+ }
+
+ ret = netif_set_real_num_rx_queues(netdev, queue_size);
+ if (ret) {
+ netdev_err(netdev,
+ "netif_set_real_num_rx_queues fail, ret=%d!\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+u16 hns3_get_max_available_channels(struct hnae3_handle *h)
+{
+ u16 alloc_tqps, max_rss_size, rss_size;
+
+ h->ae_algo->ops->get_tqps_and_rss_info(h, &alloc_tqps, &max_rss_size);
+ rss_size = alloc_tqps / h->kinfo.tc_info.num_tc;
+
+ return min_t(u16, rss_size, max_rss_size);
+}
+
+static void hns3_tqp_enable(struct hnae3_queue *tqp)
+{
+ u32 rcb_reg;
+
+ rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG);
+ rcb_reg |= BIT(HNS3_RING_EN_B);
+ hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg);
+}
+
+static void hns3_tqp_disable(struct hnae3_queue *tqp)
+{
+ u32 rcb_reg;
+
+ rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG);
+ rcb_reg &= ~BIT(HNS3_RING_EN_B);
+ hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg);
+}
+
+static void hns3_free_rx_cpu_rmap(struct net_device *netdev)
+{
+#ifdef CONFIG_RFS_ACCEL
+ free_irq_cpu_rmap(netdev->rx_cpu_rmap);
+ netdev->rx_cpu_rmap = NULL;
+#endif
+}
+
+static int hns3_set_rx_cpu_rmap(struct net_device *netdev)
+{
+#ifdef CONFIG_RFS_ACCEL
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hns3_enet_tqp_vector *tqp_vector;
+ int i, ret;
+
+ if (!netdev->rx_cpu_rmap) {
+ netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->vector_num);
+ if (!netdev->rx_cpu_rmap)
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < priv->vector_num; i++) {
+ tqp_vector = &priv->tqp_vector[i];
+ ret = irq_cpu_rmap_add(netdev->rx_cpu_rmap,
+ tqp_vector->vector_irq);
+ if (ret) {
+ hns3_free_rx_cpu_rmap(netdev);
+ return ret;
+ }
+ }
+#endif
+ return 0;
+}
+
+static int hns3_nic_net_up(struct net_device *netdev)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = priv->ae_handle;
+ int i, j;
+ int ret;
+
+ ret = hns3_nic_reset_all_ring(h);
+ if (ret)
+ return ret;
+
+ clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
+
+ /* enable the vectors */
+ for (i = 0; i < priv->vector_num; i++)
+ hns3_vector_enable(&priv->tqp_vector[i]);
+
+ /* enable rcb */
+ for (j = 0; j < h->kinfo.num_tqps; j++)
+ hns3_tqp_enable(h->kinfo.tqp[j]);
+
+ /* start the ae_dev */
+ ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0;
+ if (ret) {
+ set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
+ while (j--)
+ hns3_tqp_disable(h->kinfo.tqp[j]);
+
+ for (j = i - 1; j >= 0; j--)
+ hns3_vector_disable(&priv->tqp_vector[j]);
+ }
+
+ return ret;
+}
+
+static void hns3_config_xps(struct hns3_nic_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->vector_num; i++) {
+ struct hns3_enet_tqp_vector *tqp_vector = &priv->tqp_vector[i];
+ struct hns3_enet_ring *ring = tqp_vector->tx_group.ring;
+
+ while (ring) {
+ int ret;
+
+ ret = netif_set_xps_queue(priv->netdev,
+ &tqp_vector->affinity_mask,
+ ring->tqp->tqp_index);
+ if (ret)
+ netdev_warn(priv->netdev,
+ "set xps queue failed: %d", ret);
+
+ ring = ring->next;
+ }
+ }
+}
+
+static int hns3_nic_net_open(struct net_device *netdev)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ struct hnae3_knic_private_info *kinfo;
+ int i, ret;
+
+ if (hns3_nic_resetting(netdev))
+ return -EBUSY;
+
+ if (!test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) {
+ netdev_warn(netdev, "net open repeatedly!\n");
+ return 0;
+ }
+
+ netif_carrier_off(netdev);
+
+ ret = hns3_nic_set_real_num_queue(netdev);
+ if (ret)
+ return ret;
+
+ ret = hns3_nic_net_up(netdev);
+ if (ret) {
+ netdev_err(netdev, "net up fail, ret=%d!\n", ret);
+ return ret;
+ }
+
+ kinfo = &h->kinfo;
+ for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
+ netdev_set_prio_tc_map(netdev, i, kinfo->tc_info.prio_tc[i]);
+
+ if (h->ae_algo->ops->set_timer_task)
+ h->ae_algo->ops->set_timer_task(priv->ae_handle, true);
+
+ hns3_config_xps(priv);
+
+ netif_dbg(h, drv, netdev, "net open\n");
+
+ return 0;
+}
+
+static void hns3_reset_tx_queue(struct hnae3_handle *h)
+{
+ struct net_device *ndev = h->kinfo.netdev;
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct netdev_queue *dev_queue;
+ u32 i;
+
+ for (i = 0; i < h->kinfo.num_tqps; i++) {
+ dev_queue = netdev_get_tx_queue(ndev,
+ priv->ring[i].queue_index);
+ netdev_tx_reset_queue(dev_queue);
+ }
+}
+
+static void hns3_nic_net_down(struct net_device *netdev)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ const struct hnae3_ae_ops *ops;
+ int i;
+
+ /* disable vectors */
+ for (i = 0; i < priv->vector_num; i++)
+ hns3_vector_disable(&priv->tqp_vector[i]);
+
+ /* disable rcb */
+ for (i = 0; i < h->kinfo.num_tqps; i++)
+ hns3_tqp_disable(h->kinfo.tqp[i]);
+
+ /* stop ae_dev */
+ ops = priv->ae_handle->ae_algo->ops;
+ if (ops->stop)
+ ops->stop(priv->ae_handle);
+
+ /* delay ring buffer clearing to hns3_reset_notify_uninit_enet
+ * during reset process, because driver may not be able
+ * to disable the ring through firmware when downing the netdev.
+ */
+ if (!hns3_nic_resetting(netdev))
+ hns3_clear_all_ring(priv->ae_handle, false);
+
+ hns3_reset_tx_queue(priv->ae_handle);
+}
+
+static int hns3_nic_net_stop(struct net_device *netdev)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
+ return 0;
+
+ netif_dbg(h, drv, netdev, "net stop\n");
+
+ if (h->ae_algo->ops->set_timer_task)
+ h->ae_algo->ops->set_timer_task(priv->ae_handle, false);
+
+ netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
+
+ hns3_nic_net_down(netdev);
+
+ return 0;
+}
+
+static int hns3_nic_uc_sync(struct net_device *netdev,
+ const unsigned char *addr)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (h->ae_algo->ops->add_uc_addr)
+ return h->ae_algo->ops->add_uc_addr(h, addr);
+
+ return 0;
+}
+
+static int hns3_nic_uc_unsync(struct net_device *netdev,
+ const unsigned char *addr)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ /* need ignore the request of removing device address, because
+ * we store the device address and other addresses of uc list
+ * in the function's mac filter list.
+ */
+ if (ether_addr_equal(addr, netdev->dev_addr))
+ return 0;
+
+ if (h->ae_algo->ops->rm_uc_addr)
+ return h->ae_algo->ops->rm_uc_addr(h, addr);
+
+ return 0;
+}
+
+static int hns3_nic_mc_sync(struct net_device *netdev,
+ const unsigned char *addr)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (h->ae_algo->ops->add_mc_addr)
+ return h->ae_algo->ops->add_mc_addr(h, addr);
+
+ return 0;
+}
+
+static int hns3_nic_mc_unsync(struct net_device *netdev,
+ const unsigned char *addr)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (h->ae_algo->ops->rm_mc_addr)
+ return h->ae_algo->ops->rm_mc_addr(h, addr);
+
+ return 0;
+}
+
+static u8 hns3_get_netdev_flags(struct net_device *netdev)
+{
+ u8 flags = 0;
+
+ if (netdev->flags & IFF_PROMISC)
+ flags = HNAE3_USER_UPE | HNAE3_USER_MPE | HNAE3_BPE;
+ else if (netdev->flags & IFF_ALLMULTI)
+ flags = HNAE3_USER_MPE;
+
+ return flags;
+}
+
+static void hns3_nic_set_rx_mode(struct net_device *netdev)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ u8 new_flags;
+
+ new_flags = hns3_get_netdev_flags(netdev);
+
+ __dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync);
+ __dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync);
+
+ /* User mode Promisc mode enable and vlan filtering is disabled to
+ * let all packets in.
+ */
+ h->netdev_flags = new_flags;
+ hns3_request_update_promisc_mode(h);
+}
+
+void hns3_request_update_promisc_mode(struct hnae3_handle *handle)
+{
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+
+ if (ops->request_update_promisc_mode)
+ ops->request_update_promisc_mode(handle);
+}
+
+static u32 hns3_tx_spare_space(struct hns3_enet_ring *ring)
+{
+ struct hns3_tx_spare *tx_spare = ring->tx_spare;
+ u32 ntc, ntu;
+
+ /* This smp_load_acquire() pairs with smp_store_release() in
+ * hns3_tx_spare_update() called in tx desc cleaning process.
+ */
+ ntc = smp_load_acquire(&tx_spare->last_to_clean);
+ ntu = tx_spare->next_to_use;
+
+ if (ntc > ntu)
+ return ntc - ntu - 1;
+
+ /* The free tx buffer is divided into two part, so pick the
+ * larger one.
+ */
+ return max(ntc, tx_spare->len - ntu) - 1;
+}
+
+static void hns3_tx_spare_update(struct hns3_enet_ring *ring)
+{
+ struct hns3_tx_spare *tx_spare = ring->tx_spare;
+
+ if (!tx_spare ||
+ tx_spare->last_to_clean == tx_spare->next_to_clean)
+ return;
+
+ /* This smp_store_release() pairs with smp_load_acquire() in
+ * hns3_tx_spare_space() called in xmit process.
+ */
+ smp_store_release(&tx_spare->last_to_clean,
+ tx_spare->next_to_clean);
+}
+
+static bool hns3_can_use_tx_bounce(struct hns3_enet_ring *ring,
+ struct sk_buff *skb,
+ u32 space)
+{
+ u32 len = skb->len <= ring->tx_copybreak ? skb->len :
+ skb_headlen(skb);
+
+ if (len > ring->tx_copybreak)
+ return false;
+
+ if (ALIGN(len, dma_get_cache_alignment()) > space) {
+ hns3_ring_stats_update(ring, tx_spare_full);
+ return false;
+ }
+
+ return true;
+}
+
+static bool hns3_can_use_tx_sgl(struct hns3_enet_ring *ring,
+ struct sk_buff *skb,
+ u32 space)
+{
+ if (skb->len <= ring->tx_copybreak || !tx_sgl ||
+ (!skb_has_frag_list(skb) &&
+ skb_shinfo(skb)->nr_frags < tx_sgl))
+ return false;
+
+ if (space < HNS3_MAX_SGL_SIZE) {
+ hns3_ring_stats_update(ring, tx_spare_full);
+ return false;
+ }
+
+ return true;
+}
+
+static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring)
+{
+ u32 alloc_size = ring->tqp->handle->kinfo.tx_spare_buf_size;
+ struct hns3_tx_spare *tx_spare;
+ struct page *page;
+ dma_addr_t dma;
+ int order;
+
+ if (!alloc_size)
+ return;
+
+ order = get_order(alloc_size);
+ if (order > MAX_ORDER) {
+ if (net_ratelimit())
+ dev_warn(ring_to_dev(ring), "failed to allocate tx spare buffer, exceed to max order\n");
+ return;
+ }
+
+ tx_spare = devm_kzalloc(ring_to_dev(ring), sizeof(*tx_spare),
+ GFP_KERNEL);
+ if (!tx_spare) {
+ /* The driver still work without the tx spare buffer */
+ dev_warn(ring_to_dev(ring), "failed to allocate hns3_tx_spare\n");
+ goto devm_kzalloc_error;
+ }
+
+ page = alloc_pages_node(dev_to_node(ring_to_dev(ring)),
+ GFP_KERNEL, order);
+ if (!page) {
+ dev_warn(ring_to_dev(ring), "failed to allocate tx spare pages\n");
+ goto alloc_pages_error;
+ }
+
+ dma = dma_map_page(ring_to_dev(ring), page, 0,
+ PAGE_SIZE << order, DMA_TO_DEVICE);
+ if (dma_mapping_error(ring_to_dev(ring), dma)) {
+ dev_warn(ring_to_dev(ring), "failed to map pages for tx spare\n");
+ goto dma_mapping_error;
+ }
+
+ tx_spare->dma = dma;
+ tx_spare->buf = page_address(page);
+ tx_spare->len = PAGE_SIZE << order;
+ ring->tx_spare = tx_spare;
+ return;
+
+dma_mapping_error:
+ put_page(page);
+alloc_pages_error:
+ devm_kfree(ring_to_dev(ring), tx_spare);
+devm_kzalloc_error:
+ ring->tqp->handle->kinfo.tx_spare_buf_size = 0;
+}
+
+/* Use hns3_tx_spare_space() to make sure there is enough buffer
+ * before calling below function to allocate tx buffer.
+ */
+static void *hns3_tx_spare_alloc(struct hns3_enet_ring *ring,
+ unsigned int size, dma_addr_t *dma,
+ u32 *cb_len)
+{
+ struct hns3_tx_spare *tx_spare = ring->tx_spare;
+ u32 ntu = tx_spare->next_to_use;
+
+ size = ALIGN(size, dma_get_cache_alignment());
+ *cb_len = size;
+
+ /* Tx spare buffer wraps back here because the end of
+ * freed tx buffer is not enough.
+ */
+ if (ntu + size > tx_spare->len) {
+ *cb_len += (tx_spare->len - ntu);
+ ntu = 0;
+ }
+
+ tx_spare->next_to_use = ntu + size;
+ if (tx_spare->next_to_use == tx_spare->len)
+ tx_spare->next_to_use = 0;
+
+ *dma = tx_spare->dma + ntu;
+
+ return tx_spare->buf + ntu;
+}
+
+static void hns3_tx_spare_rollback(struct hns3_enet_ring *ring, u32 len)
+{
+ struct hns3_tx_spare *tx_spare = ring->tx_spare;
+
+ if (len > tx_spare->next_to_use) {
+ len -= tx_spare->next_to_use;
+ tx_spare->next_to_use = tx_spare->len - len;
+ } else {
+ tx_spare->next_to_use -= len;
+ }
+}
+
+static void hns3_tx_spare_reclaim_cb(struct hns3_enet_ring *ring,
+ struct hns3_desc_cb *cb)
+{
+ struct hns3_tx_spare *tx_spare = ring->tx_spare;
+ u32 ntc = tx_spare->next_to_clean;
+ u32 len = cb->length;
+
+ tx_spare->next_to_clean += len;
+
+ if (tx_spare->next_to_clean >= tx_spare->len) {
+ tx_spare->next_to_clean -= tx_spare->len;
+
+ if (tx_spare->next_to_clean) {
+ ntc = 0;
+ len = tx_spare->next_to_clean;
+ }
+ }
+
+ /* This tx spare buffer is only really reclaimed after calling
+ * hns3_tx_spare_update(), so it is still safe to use the info in
+ * the tx buffer to do the dma sync or sg unmapping after
+ * tx_spare->next_to_clean is moved forword.
+ */
+ if (cb->type & (DESC_TYPE_BOUNCE_HEAD | DESC_TYPE_BOUNCE_ALL)) {
+ dma_addr_t dma = tx_spare->dma + ntc;
+
+ dma_sync_single_for_cpu(ring_to_dev(ring), dma, len,
+ DMA_TO_DEVICE);
+ } else {
+ struct sg_table *sgt = tx_spare->buf + ntc;
+
+ dma_unmap_sg(ring_to_dev(ring), sgt->sgl, sgt->orig_nents,
+ DMA_TO_DEVICE);
+ }
+}
+
+static int hns3_set_tso(struct sk_buff *skb, u32 *paylen_fdop_ol4cs,
+ u16 *mss, u32 *type_cs_vlan_tso, u32 *send_bytes)
+{
+ u32 l4_offset, hdr_len;
+ union l3_hdr_info l3;
+ union l4_hdr_info l4;
+ u32 l4_paylen;
+ int ret;
+
+ if (!skb_is_gso(skb))
+ return 0;
+
+ ret = skb_cow_head(skb, 0);
+ if (unlikely(ret < 0))
+ return ret;
+
+ l3.hdr = skb_network_header(skb);
+ l4.hdr = skb_transport_header(skb);
+
+ /* Software should clear the IPv4's checksum field when tso is
+ * needed.
+ */
+ if (l3.v4->version == 4)
+ l3.v4->check = 0;
+
+ /* tunnel packet */
+ if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
+ SKB_GSO_GRE_CSUM |
+ SKB_GSO_UDP_TUNNEL |
+ SKB_GSO_UDP_TUNNEL_CSUM)) {
+ /* reset l3&l4 pointers from outer to inner headers */
+ l3.hdr = skb_inner_network_header(skb);
+ l4.hdr = skb_inner_transport_header(skb);
+
+ /* Software should clear the IPv4's checksum field when
+ * tso is needed.
+ */
+ if (l3.v4->version == 4)
+ l3.v4->check = 0;
+ }
+
+ /* normal or tunnel packet */
+ l4_offset = l4.hdr - skb->data;
+
+ /* remove payload length from inner pseudo checksum when tso */
+ l4_paylen = skb->len - l4_offset;
+
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
+ hdr_len = sizeof(*l4.udp) + l4_offset;
+ csum_replace_by_diff(&l4.udp->check,
+ (__force __wsum)htonl(l4_paylen));
+ } else {
+ hdr_len = (l4.tcp->doff << 2) + l4_offset;
+ csum_replace_by_diff(&l4.tcp->check,
+ (__force __wsum)htonl(l4_paylen));
+ }
+
+ *send_bytes = (skb_shinfo(skb)->gso_segs - 1) * hdr_len + skb->len;
+
+ /* find the txbd field values */
+ *paylen_fdop_ol4cs = skb->len - hdr_len;
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_TSO_B, 1);
+
+ /* offload outer UDP header checksum */
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)
+ hns3_set_field(*paylen_fdop_ol4cs, HNS3_TXD_OL4CS_B, 1);
+
+ /* get MSS for TSO */
+ *mss = skb_shinfo(skb)->gso_size;
+
+ trace_hns3_tso(skb);
+
+ return 0;
+}
+
+static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
+ u8 *il4_proto)
+{
+ union l3_hdr_info l3;
+ unsigned char *l4_hdr;
+ unsigned char *exthdr;
+ u8 l4_proto_tmp;
+ __be16 frag_off;
+
+ /* find outer header point */
+ l3.hdr = skb_network_header(skb);
+ l4_hdr = skb_transport_header(skb);
+
+ if (skb->protocol == htons(ETH_P_IPV6)) {
+ exthdr = l3.hdr + sizeof(*l3.v6);
+ l4_proto_tmp = l3.v6->nexthdr;
+ if (l4_hdr != exthdr)
+ ipv6_skip_exthdr(skb, exthdr - skb->data,
+ &l4_proto_tmp, &frag_off);
+ } else if (skb->protocol == htons(ETH_P_IP)) {
+ l4_proto_tmp = l3.v4->protocol;
+ } else {
+ return -EINVAL;
+ }
+
+ *ol4_proto = l4_proto_tmp;
+
+ /* tunnel packet */
+ if (!skb->encapsulation) {
+ *il4_proto = 0;
+ return 0;
+ }
+
+ /* find inner header point */
+ l3.hdr = skb_inner_network_header(skb);
+ l4_hdr = skb_inner_transport_header(skb);
+
+ if (l3.v6->version == 6) {
+ exthdr = l3.hdr + sizeof(*l3.v6);
+ l4_proto_tmp = l3.v6->nexthdr;
+ if (l4_hdr != exthdr)
+ ipv6_skip_exthdr(skb, exthdr - skb->data,
+ &l4_proto_tmp, &frag_off);
+ } else if (l3.v4->version == 4) {
+ l4_proto_tmp = l3.v4->protocol;
+ }
+
+ *il4_proto = l4_proto_tmp;
+
+ return 0;
+}
+
+/* when skb->encapsulation is 0, skb->ip_summed is CHECKSUM_PARTIAL
+ * and it is udp packet, which has a dest port as the IANA assigned.
+ * the hardware is expected to do the checksum offload, but the
+ * hardware will not do the checksum offload when udp dest port is
+ * 4789, 4790 or 6081.
+ */
+static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
+{
+ struct hns3_nic_priv *priv = netdev_priv(skb->dev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
+ union l4_hdr_info l4;
+
+ /* device version above V3(include V3), the hardware can
+ * do this checksum offload.
+ */
+ if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
+ return false;
+
+ l4.hdr = skb_transport_header(skb);
+
+ if (!(!skb->encapsulation &&
+ (l4.udp->dest == htons(IANA_VXLAN_UDP_PORT) ||
+ l4.udp->dest == htons(GENEVE_UDP_PORT) ||
+ l4.udp->dest == htons(IANA_VXLAN_GPE_UDP_PORT))))
+ return false;
+
+ return true;
+}
+
+static void hns3_set_outer_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
+ u32 *ol_type_vlan_len_msec)
+{
+ u32 l2_len, l3_len, l4_len;
+ unsigned char *il2_hdr;
+ union l3_hdr_info l3;
+ union l4_hdr_info l4;
+
+ l3.hdr = skb_network_header(skb);
+ l4.hdr = skb_transport_header(skb);
+
+ /* compute OL2 header size, defined in 2 Bytes */
+ l2_len = l3.hdr - skb->data;
+ hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L2LEN_S, l2_len >> 1);
+
+ /* compute OL3 header size, defined in 4 Bytes */
+ l3_len = l4.hdr - l3.hdr;
+ hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_S, l3_len >> 2);
+
+ il2_hdr = skb_inner_mac_header(skb);
+ /* compute OL4 header size, defined in 4 Bytes */
+ l4_len = il2_hdr - l4.hdr;
+ hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_S, l4_len >> 2);
+
+ /* define outer network header type */
+ if (skb->protocol == htons(ETH_P_IP)) {
+ if (skb_is_gso(skb))
+ hns3_set_field(*ol_type_vlan_len_msec,
+ HNS3_TXD_OL3T_S,
+ HNS3_OL3T_IPV4_CSUM);
+ else
+ hns3_set_field(*ol_type_vlan_len_msec,
+ HNS3_TXD_OL3T_S,
+ HNS3_OL3T_IPV4_NO_CSUM);
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_S,
+ HNS3_OL3T_IPV6);
+ }
+
+ if (ol4_proto == IPPROTO_UDP)
+ hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_TUNTYPE_S,
+ HNS3_TUN_MAC_IN_UDP);
+ else if (ol4_proto == IPPROTO_GRE)
+ hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_TUNTYPE_S,
+ HNS3_TUN_NVGRE);
+}
+
+static void hns3_set_l3_type(struct sk_buff *skb, union l3_hdr_info l3,
+ u32 *type_cs_vlan_tso)
+{
+ if (l3.v4->version == 4) {
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S,
+ HNS3_L3T_IPV4);
+
+ /* the stack computes the IP header already, the only time we
+ * need the hardware to recompute it is in the case of TSO.
+ */
+ if (skb_is_gso(skb))
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1);
+ } else if (l3.v6->version == 6) {
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S,
+ HNS3_L3T_IPV6);
+ }
+}
+
+static int hns3_set_l4_csum_length(struct sk_buff *skb, union l4_hdr_info l4,
+ u32 l4_proto, u32 *type_cs_vlan_tso)
+{
+ /* compute inner(/normal) L4 header size, defined in 4 Bytes */
+ switch (l4_proto) {
+ case IPPROTO_TCP:
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
+ HNS3_L4T_TCP);
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
+ l4.tcp->doff);
+ break;
+ case IPPROTO_UDP:
+ if (hns3_tunnel_csum_bug(skb)) {
+ int ret = skb_put_padto(skb, HNS3_MIN_TUN_PKT_LEN);
+
+ return ret ? ret : skb_checksum_help(skb);
+ }
+
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
+ HNS3_L4T_UDP);
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
+ (sizeof(struct udphdr) >> 2));
+ break;
+ case IPPROTO_SCTP:
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
+ HNS3_L4T_SCTP);
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
+ (sizeof(struct sctphdr) >> 2));
+ break;
+ default:
+ /* drop the skb tunnel packet if hardware don't support,
+ * because hardware can't calculate csum when TSO.
+ */
+ if (skb_is_gso(skb))
+ return -EDOM;
+
+ /* the stack computes the IP header already,
+ * driver calculate l4 checksum when not TSO.
+ */
+ return skb_checksum_help(skb);
+ }
+
+ return 0;
+}
+
+static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
+ u8 il4_proto, u32 *type_cs_vlan_tso,
+ u32 *ol_type_vlan_len_msec)
+{
+ unsigned char *l2_hdr = skb->data;
+ u32 l4_proto = ol4_proto;
+ union l4_hdr_info l4;
+ union l3_hdr_info l3;
+ u32 l2_len, l3_len;
+
+ l4.hdr = skb_transport_header(skb);
+ l3.hdr = skb_network_header(skb);
+
+ /* handle encapsulation skb */
+ if (skb->encapsulation) {
+ /* If this is a not UDP/GRE encapsulation skb */
+ if (!(ol4_proto == IPPROTO_UDP || ol4_proto == IPPROTO_GRE)) {
+ /* drop the skb tunnel packet if hardware don't support,
+ * because hardware can't calculate csum when TSO.
+ */
+ if (skb_is_gso(skb))
+ return -EDOM;
+
+ /* the stack computes the IP header already,
+ * driver calculate l4 checksum when not TSO.
+ */
+ return skb_checksum_help(skb);
+ }
+
+ hns3_set_outer_l2l3l4(skb, ol4_proto, ol_type_vlan_len_msec);
+
+ /* switch to inner header */
+ l2_hdr = skb_inner_mac_header(skb);
+ l3.hdr = skb_inner_network_header(skb);
+ l4.hdr = skb_inner_transport_header(skb);
+ l4_proto = il4_proto;
+ }
+
+ hns3_set_l3_type(skb, l3, type_cs_vlan_tso);
+
+ /* compute inner(/normal) L2 header size, defined in 2 Bytes */
+ l2_len = l3.hdr - l2_hdr;
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, l2_len >> 1);
+
+ /* compute inner(/normal) L3 header size, defined in 4 Bytes */
+ l3_len = l4.hdr - l3.hdr;
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_S, l3_len >> 2);
+
+ return hns3_set_l4_csum_length(skb, l4, l4_proto, type_cs_vlan_tso);
+}
+
+static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring,
+ struct sk_buff *skb)
+{
+ struct hnae3_handle *handle = tx_ring->tqp->handle;
+ struct hnae3_ae_dev *ae_dev;
+ struct vlan_ethhdr *vhdr;
+ int rc;
+
+ if (!(skb->protocol == htons(ETH_P_8021Q) ||
+ skb_vlan_tag_present(skb)))
+ return 0;
+
+ /* For HW limitation on HNAE3_DEVICE_VERSION_V2, if port based insert
+ * VLAN enabled, only one VLAN header is allowed in skb, otherwise it
+ * will cause RAS error.
+ */
+ ae_dev = pci_get_drvdata(handle->pdev);
+ if (unlikely(skb_vlan_tagged_multi(skb) &&
+ ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
+ handle->port_base_vlan_state ==
+ HNAE3_PORT_BASE_VLAN_ENABLE))
+ return -EINVAL;
+
+ if (skb->protocol == htons(ETH_P_8021Q) &&
+ !(handle->kinfo.netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
+ /* When HW VLAN acceleration is turned off, and the stack
+ * sets the protocol to 802.1q, the driver just need to
+ * set the protocol to the encapsulated ethertype.
+ */
+ skb->protocol = vlan_get_protocol(skb);
+ return 0;
+ }
+
+ if (skb_vlan_tag_present(skb)) {
+ /* Based on hw strategy, use out_vtag in two layer tag case,
+ * and use inner_vtag in one tag case.
+ */
+ if (skb->protocol == htons(ETH_P_8021Q) &&
+ handle->port_base_vlan_state ==
+ HNAE3_PORT_BASE_VLAN_DISABLE)
+ rc = HNS3_OUTER_VLAN_TAG;
+ else
+ rc = HNS3_INNER_VLAN_TAG;
+
+ skb->protocol = vlan_get_protocol(skb);
+ return rc;
+ }
+
+ rc = skb_cow_head(skb, 0);
+ if (unlikely(rc < 0))
+ return rc;
+
+ vhdr = skb_vlan_eth_hdr(skb);
+ vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority << VLAN_PRIO_SHIFT)
+ & VLAN_PRIO_MASK);
+
+ skb->protocol = vlan_get_protocol(skb);
+ return 0;
+}
+
+/* check if the hardware is capable of checksum offloading */
+static bool hns3_check_hw_tx_csum(struct sk_buff *skb)
+{
+ struct hns3_nic_priv *priv = netdev_priv(skb->dev);
+
+ /* Kindly note, due to backward compatibility of the TX descriptor,
+ * HW checksum of the non-IP packets and GSO packets is handled at
+ * different place in the following code
+ */
+ if (skb_csum_is_sctp(skb) || skb_is_gso(skb) ||
+ !test_bit(HNS3_NIC_STATE_HW_TX_CSUM_ENABLE, &priv->state))
+ return false;
+
+ return true;
+}
+
+struct hns3_desc_param {
+ u32 paylen_ol4cs;
+ u32 ol_type_vlan_len_msec;
+ u32 type_cs_vlan_tso;
+ u16 mss_hw_csum;
+ u16 inner_vtag;
+ u16 out_vtag;
+};
+
+static void hns3_init_desc_data(struct sk_buff *skb, struct hns3_desc_param *pa)
+{
+ pa->paylen_ol4cs = skb->len;
+ pa->ol_type_vlan_len_msec = 0;
+ pa->type_cs_vlan_tso = 0;
+ pa->mss_hw_csum = 0;
+ pa->inner_vtag = 0;
+ pa->out_vtag = 0;
+}
+
+static int hns3_handle_vlan_info(struct hns3_enet_ring *ring,
+ struct sk_buff *skb,
+ struct hns3_desc_param *param)
+{
+ int ret;
+
+ ret = hns3_handle_vtags(ring, skb);
+ if (unlikely(ret < 0)) {
+ hns3_ring_stats_update(ring, tx_vlan_err);
+ return ret;
+ } else if (ret == HNS3_INNER_VLAN_TAG) {
+ param->inner_vtag = skb_vlan_tag_get(skb);
+ param->inner_vtag |= (skb->priority << VLAN_PRIO_SHIFT) &
+ VLAN_PRIO_MASK;
+ hns3_set_field(param->type_cs_vlan_tso, HNS3_TXD_VLAN_B, 1);
+ } else if (ret == HNS3_OUTER_VLAN_TAG) {
+ param->out_vtag = skb_vlan_tag_get(skb);
+ param->out_vtag |= (skb->priority << VLAN_PRIO_SHIFT) &
+ VLAN_PRIO_MASK;
+ hns3_set_field(param->ol_type_vlan_len_msec, HNS3_TXD_OVLAN_B,
+ 1);
+ }
+ return 0;
+}
+
+static int hns3_handle_csum_partial(struct hns3_enet_ring *ring,
+ struct sk_buff *skb,
+ struct hns3_desc_cb *desc_cb,
+ struct hns3_desc_param *param)
+{
+ u8 ol4_proto, il4_proto;
+ int ret;
+
+ if (hns3_check_hw_tx_csum(skb)) {
+ /* set checksum start and offset, defined in 2 Bytes */
+ hns3_set_field(param->type_cs_vlan_tso, HNS3_TXD_CSUM_START_S,
+ skb_checksum_start_offset(skb) >> 1);
+ hns3_set_field(param->ol_type_vlan_len_msec,
+ HNS3_TXD_CSUM_OFFSET_S,
+ skb->csum_offset >> 1);
+ param->mss_hw_csum |= BIT(HNS3_TXD_HW_CS_B);
+ return 0;
+ }
+
+ skb_reset_mac_len(skb);
+
+ ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
+ if (unlikely(ret < 0)) {
+ hns3_ring_stats_update(ring, tx_l4_proto_err);
+ return ret;
+ }
+
+ ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto,
+ &param->type_cs_vlan_tso,
+ &param->ol_type_vlan_len_msec);
+ if (unlikely(ret < 0)) {
+ hns3_ring_stats_update(ring, tx_l2l3l4_err);
+ return ret;
+ }
+
+ ret = hns3_set_tso(skb, &param->paylen_ol4cs, &param->mss_hw_csum,
+ &param->type_cs_vlan_tso, &desc_cb->send_bytes);
+ if (unlikely(ret < 0)) {
+ hns3_ring_stats_update(ring, tx_tso_err);
+ return ret;
+ }
+ return 0;
+}
+
+static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
+ struct sk_buff *skb, struct hns3_desc *desc,
+ struct hns3_desc_cb *desc_cb)
+{
+ struct hns3_desc_param param;
+ int ret;
+
+ hns3_init_desc_data(skb, &param);
+ ret = hns3_handle_vlan_info(ring, skb, &param);
+ if (unlikely(ret < 0))
+ return ret;
+
+ desc_cb->send_bytes = skb->len;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ ret = hns3_handle_csum_partial(ring, skb, desc_cb, &param);
+ if (ret)
+ return ret;
+ }
+
+ /* Set txbd */
+ desc->tx.ol_type_vlan_len_msec =
+ cpu_to_le32(param.ol_type_vlan_len_msec);
+ desc->tx.type_cs_vlan_tso_len = cpu_to_le32(param.type_cs_vlan_tso);
+ desc->tx.paylen_ol4cs = cpu_to_le32(param.paylen_ol4cs);
+ desc->tx.mss_hw_csum = cpu_to_le16(param.mss_hw_csum);
+ desc->tx.vlan_tag = cpu_to_le16(param.inner_vtag);
+ desc->tx.outer_vlan_tag = cpu_to_le16(param.out_vtag);
+
+ return 0;
+}
+
+static int hns3_fill_desc(struct hns3_enet_ring *ring, dma_addr_t dma,
+ unsigned int size)
+{
+#define HNS3_LIKELY_BD_NUM 1
+
+ struct hns3_desc *desc = &ring->desc[ring->next_to_use];
+ unsigned int frag_buf_num;
+ int k, sizeoflast;
+
+ if (likely(size <= HNS3_MAX_BD_SIZE)) {
+ desc->addr = cpu_to_le64(dma);
+ desc->tx.send_size = cpu_to_le16(size);
+ desc->tx.bdtp_fe_sc_vld_ra_ri =
+ cpu_to_le16(BIT(HNS3_TXD_VLD_B));
+
+ trace_hns3_tx_desc(ring, ring->next_to_use);
+ ring_ptr_move_fw(ring, next_to_use);
+ return HNS3_LIKELY_BD_NUM;
+ }
+
+ frag_buf_num = hns3_tx_bd_count(size);
+ sizeoflast = size % HNS3_MAX_BD_SIZE;
+ sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE;
+
+ /* When frag size is bigger than hardware limit, split this frag */
+ for (k = 0; k < frag_buf_num; k++) {
+ /* now, fill the descriptor */
+ desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k);
+ desc->tx.send_size = cpu_to_le16((k == frag_buf_num - 1) ?
+ (u16)sizeoflast : (u16)HNS3_MAX_BD_SIZE);
+ desc->tx.bdtp_fe_sc_vld_ra_ri =
+ cpu_to_le16(BIT(HNS3_TXD_VLD_B));
+
+ trace_hns3_tx_desc(ring, ring->next_to_use);
+ /* move ring pointer to next */
+ ring_ptr_move_fw(ring, next_to_use);
+
+ desc = &ring->desc[ring->next_to_use];
+ }
+
+ return frag_buf_num;
+}
+
+static int hns3_map_and_fill_desc(struct hns3_enet_ring *ring, void *priv,
+ unsigned int type)
+{
+ struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
+ struct device *dev = ring_to_dev(ring);
+ unsigned int size;
+ dma_addr_t dma;
+
+ if (type & (DESC_TYPE_FRAGLIST_SKB | DESC_TYPE_SKB)) {
+ struct sk_buff *skb = (struct sk_buff *)priv;
+
+ size = skb_headlen(skb);
+ if (!size)
+ return 0;
+
+ dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
+ } else if (type & DESC_TYPE_BOUNCE_HEAD) {
+ /* Head data has been filled in hns3_handle_tx_bounce(),
+ * just return 0 here.
+ */
+ return 0;
+ } else {
+ skb_frag_t *frag = (skb_frag_t *)priv;
+
+ size = skb_frag_size(frag);
+ if (!size)
+ return 0;
+
+ dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
+ }
+
+ if (unlikely(dma_mapping_error(dev, dma))) {
+ hns3_ring_stats_update(ring, sw_err_cnt);
+ return -ENOMEM;
+ }
+
+ desc_cb->priv = priv;
+ desc_cb->length = size;
+ desc_cb->dma = dma;
+ desc_cb->type = type;
+
+ return hns3_fill_desc(ring, dma, size);
+}
+
+static unsigned int hns3_skb_bd_num(struct sk_buff *skb, unsigned int *bd_size,
+ unsigned int bd_num)
+{
+ unsigned int size;
+ int i;
+
+ size = skb_headlen(skb);
+ while (size > HNS3_MAX_BD_SIZE) {
+ bd_size[bd_num++] = HNS3_MAX_BD_SIZE;
+ size -= HNS3_MAX_BD_SIZE;
+
+ if (bd_num > HNS3_MAX_TSO_BD_NUM)
+ return bd_num;
+ }
+
+ if (size) {
+ bd_size[bd_num++] = size;
+ if (bd_num > HNS3_MAX_TSO_BD_NUM)
+ return bd_num;
+ }
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ size = skb_frag_size(frag);
+ if (!size)
+ continue;
+
+ while (size > HNS3_MAX_BD_SIZE) {
+ bd_size[bd_num++] = HNS3_MAX_BD_SIZE;
+ size -= HNS3_MAX_BD_SIZE;
+
+ if (bd_num > HNS3_MAX_TSO_BD_NUM)
+ return bd_num;
+ }
+
+ bd_size[bd_num++] = size;
+ if (bd_num > HNS3_MAX_TSO_BD_NUM)
+ return bd_num;
+ }
+
+ return bd_num;
+}
+
+static unsigned int hns3_tx_bd_num(struct sk_buff *skb, unsigned int *bd_size,
+ u8 max_non_tso_bd_num, unsigned int bd_num,
+ unsigned int recursion_level)
+{
+#define HNS3_MAX_RECURSION_LEVEL 24
+
+ struct sk_buff *frag_skb;
+
+ /* If the total len is within the max bd limit */
+ if (likely(skb->len <= HNS3_MAX_BD_SIZE && !recursion_level &&
+ !skb_has_frag_list(skb) &&
+ skb_shinfo(skb)->nr_frags < max_non_tso_bd_num))
+ return skb_shinfo(skb)->nr_frags + 1U;
+
+ if (unlikely(recursion_level >= HNS3_MAX_RECURSION_LEVEL))
+ return UINT_MAX;
+
+ bd_num = hns3_skb_bd_num(skb, bd_size, bd_num);
+ if (!skb_has_frag_list(skb) || bd_num > HNS3_MAX_TSO_BD_NUM)
+ return bd_num;
+
+ skb_walk_frags(skb, frag_skb) {
+ bd_num = hns3_tx_bd_num(frag_skb, bd_size, max_non_tso_bd_num,
+ bd_num, recursion_level + 1);
+ if (bd_num > HNS3_MAX_TSO_BD_NUM)
+ return bd_num;
+ }
+
+ return bd_num;
+}
+
+static unsigned int hns3_gso_hdr_len(struct sk_buff *skb)
+{
+ if (!skb->encapsulation)
+ return skb_tcp_all_headers(skb);
+
+ return skb_inner_tcp_all_headers(skb);
+}
+
+/* HW need every continuous max_non_tso_bd_num buffer data to be larger
+ * than MSS, we simplify it by ensuring skb_headlen + the first continuous
+ * max_non_tso_bd_num - 1 frags to be larger than gso header len + mss,
+ * and the remaining continuous max_non_tso_bd_num - 1 frags to be larger
+ * than MSS except the last max_non_tso_bd_num - 1 frags.
+ */
+static bool hns3_skb_need_linearized(struct sk_buff *skb, unsigned int *bd_size,
+ unsigned int bd_num, u8 max_non_tso_bd_num)
+{
+ unsigned int tot_len = 0;
+ int i;
+
+ for (i = 0; i < max_non_tso_bd_num - 1U; i++)
+ tot_len += bd_size[i];
+
+ /* ensure the first max_non_tso_bd_num frags is greater than
+ * mss + header
+ */
+ if (tot_len + bd_size[max_non_tso_bd_num - 1U] <
+ skb_shinfo(skb)->gso_size + hns3_gso_hdr_len(skb))
+ return true;
+
+ /* ensure every continuous max_non_tso_bd_num - 1 buffer is greater
+ * than mss except the last one.
+ */
+ for (i = 0; i < bd_num - max_non_tso_bd_num; i++) {
+ tot_len -= bd_size[i];
+ tot_len += bd_size[i + max_non_tso_bd_num - 1U];
+
+ if (tot_len < skb_shinfo(skb)->gso_size)
+ return true;
+ }
+
+ return false;
+}
+
+void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size)
+{
+ int i;
+
+ for (i = 0; i < MAX_SKB_FRAGS; i++)
+ size[i] = skb_frag_size(&shinfo->frags[i]);
+}
+
+static int hns3_skb_linearize(struct hns3_enet_ring *ring,
+ struct sk_buff *skb,
+ unsigned int bd_num)
+{
+ /* 'bd_num == UINT_MAX' means the skb' fraglist has a
+ * recursion level of over HNS3_MAX_RECURSION_LEVEL.
+ */
+ if (bd_num == UINT_MAX) {
+ hns3_ring_stats_update(ring, over_max_recursion);
+ return -ENOMEM;
+ }
+
+ /* The skb->len has exceeded the hw limitation, linearization
+ * will not help.
+ */
+ if (skb->len > HNS3_MAX_TSO_SIZE ||
+ (!skb_is_gso(skb) && skb->len > HNS3_MAX_NON_TSO_SIZE)) {
+ hns3_ring_stats_update(ring, hw_limitation);
+ return -ENOMEM;
+ }
+
+ if (__skb_linearize(skb)) {
+ hns3_ring_stats_update(ring, sw_err_cnt);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
+ struct net_device *netdev,
+ struct sk_buff *skb)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ u8 max_non_tso_bd_num = priv->max_non_tso_bd_num;
+ unsigned int bd_size[HNS3_MAX_TSO_BD_NUM + 1U];
+ unsigned int bd_num;
+
+ bd_num = hns3_tx_bd_num(skb, bd_size, max_non_tso_bd_num, 0, 0);
+ if (unlikely(bd_num > max_non_tso_bd_num)) {
+ if (bd_num <= HNS3_MAX_TSO_BD_NUM && skb_is_gso(skb) &&
+ !hns3_skb_need_linearized(skb, bd_size, bd_num,
+ max_non_tso_bd_num)) {
+ trace_hns3_over_max_bd(skb);
+ goto out;
+ }
+
+ if (hns3_skb_linearize(ring, skb, bd_num))
+ return -ENOMEM;
+
+ bd_num = hns3_tx_bd_count(skb->len);
+
+ hns3_ring_stats_update(ring, tx_copy);
+ }
+
+out:
+ if (likely(ring_space(ring) >= bd_num))
+ return bd_num;
+
+ netif_stop_subqueue(netdev, ring->queue_index);
+ smp_mb(); /* Memory barrier before checking ring_space */
+
+ /* Start queue in case hns3_clean_tx_ring has just made room
+ * available and has not seen the queue stopped state performed
+ * by netif_stop_subqueue above.
+ */
+ if (ring_space(ring) >= bd_num && netif_carrier_ok(netdev) &&
+ !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) {
+ netif_start_subqueue(netdev, ring->queue_index);
+ return bd_num;
+ }
+
+ hns3_ring_stats_update(ring, tx_busy);
+
+ return -EBUSY;
+}
+
+static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
+{
+ struct device *dev = ring_to_dev(ring);
+ unsigned int i;
+
+ for (i = 0; i < ring->desc_num; i++) {
+ struct hns3_desc *desc = &ring->desc[ring->next_to_use];
+ struct hns3_desc_cb *desc_cb;
+
+ memset(desc, 0, sizeof(*desc));
+
+ /* check if this is where we started */
+ if (ring->next_to_use == next_to_use_orig)
+ break;
+
+ /* rollback one */
+ ring_ptr_move_bw(ring, next_to_use);
+
+ desc_cb = &ring->desc_cb[ring->next_to_use];
+
+ if (!desc_cb->dma)
+ continue;
+
+ /* unmap the descriptor dma address */
+ if (desc_cb->type & (DESC_TYPE_SKB | DESC_TYPE_FRAGLIST_SKB))
+ dma_unmap_single(dev, desc_cb->dma, desc_cb->length,
+ DMA_TO_DEVICE);
+ else if (desc_cb->type &
+ (DESC_TYPE_BOUNCE_HEAD | DESC_TYPE_BOUNCE_ALL))
+ hns3_tx_spare_rollback(ring, desc_cb->length);
+ else if (desc_cb->length)
+ dma_unmap_page(dev, desc_cb->dma, desc_cb->length,
+ DMA_TO_DEVICE);
+
+ desc_cb->length = 0;
+ desc_cb->dma = 0;
+ desc_cb->type = DESC_TYPE_UNKNOWN;
+ }
+}
+
+static int hns3_fill_skb_to_desc(struct hns3_enet_ring *ring,
+ struct sk_buff *skb, unsigned int type)
+{
+ struct sk_buff *frag_skb;
+ int i, ret, bd_num = 0;
+
+ ret = hns3_map_and_fill_desc(ring, skb, type);
+ if (unlikely(ret < 0))
+ return ret;
+
+ bd_num += ret;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ ret = hns3_map_and_fill_desc(ring, frag, DESC_TYPE_PAGE);
+ if (unlikely(ret < 0))
+ return ret;
+
+ bd_num += ret;
+ }
+
+ skb_walk_frags(skb, frag_skb) {
+ ret = hns3_fill_skb_to_desc(ring, frag_skb,
+ DESC_TYPE_FRAGLIST_SKB);
+ if (unlikely(ret < 0))
+ return ret;
+
+ bd_num += ret;
+ }
+
+ return bd_num;
+}
+
+static void hns3_tx_push_bd(struct hns3_enet_ring *ring, int num)
+{
+#define HNS3_BYTES_PER_64BIT 8
+
+ struct hns3_desc desc[HNS3_MAX_PUSH_BD_NUM] = {};
+ int offset = 0;
+
+ /* make sure everything is visible to device before
+ * excuting tx push or updating doorbell
+ */
+ dma_wmb();
+
+ do {
+ int idx = (ring->next_to_use - num + ring->desc_num) %
+ ring->desc_num;
+
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.tx_push++;
+ u64_stats_update_end(&ring->syncp);
+ memcpy(&desc[offset], &ring->desc[idx],
+ sizeof(struct hns3_desc));
+ offset++;
+ } while (--num);
+
+ __iowrite64_copy(ring->tqp->mem_base, desc,
+ (sizeof(struct hns3_desc) * HNS3_MAX_PUSH_BD_NUM) /
+ HNS3_BYTES_PER_64BIT);
+
+ io_stop_wc();
+}
+
+static void hns3_tx_mem_doorbell(struct hns3_enet_ring *ring)
+{
+#define HNS3_MEM_DOORBELL_OFFSET 64
+
+ __le64 bd_num = cpu_to_le64((u64)ring->pending_buf);
+
+ /* make sure everything is visible to device before
+ * excuting tx push or updating doorbell
+ */
+ dma_wmb();
+
+ __iowrite64_copy(ring->tqp->mem_base + HNS3_MEM_DOORBELL_OFFSET,
+ &bd_num, 1);
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.tx_mem_doorbell += ring->pending_buf;
+ u64_stats_update_end(&ring->syncp);
+
+ io_stop_wc();
+}
+
+static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num,
+ bool doorbell)
+{
+ struct net_device *netdev = ring_to_netdev(ring);
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+
+ /* when tx push is enabled, the packet whose number of BD below
+ * HNS3_MAX_PUSH_BD_NUM can be pushed directly.
+ */
+ if (test_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state) && num &&
+ !ring->pending_buf && num <= HNS3_MAX_PUSH_BD_NUM && doorbell) {
+ /* This smp_store_release() pairs with smp_load_aquire() in
+ * hns3_nic_reclaim_desc(). Ensure that the BD valid bit
+ * is updated.
+ */
+ smp_store_release(&ring->last_to_use, ring->next_to_use);
+ hns3_tx_push_bd(ring, num);
+ return;
+ }
+
+ ring->pending_buf += num;
+
+ if (!doorbell) {
+ hns3_ring_stats_update(ring, tx_more);
+ return;
+ }
+
+ /* This smp_store_release() pairs with smp_load_aquire() in
+ * hns3_nic_reclaim_desc(). Ensure that the BD valid bit is updated.
+ */
+ smp_store_release(&ring->last_to_use, ring->next_to_use);
+
+ if (ring->tqp->mem_base)
+ hns3_tx_mem_doorbell(ring);
+ else
+ writel(ring->pending_buf,
+ ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG);
+
+ ring->pending_buf = 0;
+}
+
+static void hns3_tsyn(struct net_device *netdev, struct sk_buff *skb,
+ struct hns3_desc *desc)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (!(h->ae_algo->ops->set_tx_hwts_info &&
+ h->ae_algo->ops->set_tx_hwts_info(h, skb)))
+ return;
+
+ desc->tx.bdtp_fe_sc_vld_ra_ri |= cpu_to_le16(BIT(HNS3_TXD_TSYN_B));
+}
+
+static int hns3_handle_tx_bounce(struct hns3_enet_ring *ring,
+ struct sk_buff *skb)
+{
+ struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
+ unsigned int type = DESC_TYPE_BOUNCE_HEAD;
+ unsigned int size = skb_headlen(skb);
+ dma_addr_t dma;
+ int bd_num = 0;
+ u32 cb_len;
+ void *buf;
+ int ret;
+
+ if (skb->len <= ring->tx_copybreak) {
+ size = skb->len;
+ type = DESC_TYPE_BOUNCE_ALL;
+ }
+
+ /* hns3_can_use_tx_bounce() is called to ensure the below
+ * function can always return the tx buffer.
+ */
+ buf = hns3_tx_spare_alloc(ring, size, &dma, &cb_len);
+
+ ret = skb_copy_bits(skb, 0, buf, size);
+ if (unlikely(ret < 0)) {
+ hns3_tx_spare_rollback(ring, cb_len);
+ hns3_ring_stats_update(ring, copy_bits_err);
+ return ret;
+ }
+
+ desc_cb->priv = skb;
+ desc_cb->length = cb_len;
+ desc_cb->dma = dma;
+ desc_cb->type = type;
+
+ bd_num += hns3_fill_desc(ring, dma, size);
+
+ if (type == DESC_TYPE_BOUNCE_HEAD) {
+ ret = hns3_fill_skb_to_desc(ring, skb,
+ DESC_TYPE_BOUNCE_HEAD);
+ if (unlikely(ret < 0))
+ return ret;
+
+ bd_num += ret;
+ }
+
+ dma_sync_single_for_device(ring_to_dev(ring), dma, size,
+ DMA_TO_DEVICE);
+
+ hns3_ring_stats_update(ring, tx_bounce);
+
+ return bd_num;
+}
+
+static int hns3_handle_tx_sgl(struct hns3_enet_ring *ring,
+ struct sk_buff *skb)
+{
+ struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
+ u32 nfrag = skb_shinfo(skb)->nr_frags + 1;
+ struct sg_table *sgt;
+ int i, bd_num = 0;
+ dma_addr_t dma;
+ u32 cb_len;
+ int nents;
+
+ if (skb_has_frag_list(skb))
+ nfrag = HNS3_MAX_TSO_BD_NUM;
+
+ /* hns3_can_use_tx_sgl() is called to ensure the below
+ * function can always return the tx buffer.
+ */
+ sgt = hns3_tx_spare_alloc(ring, HNS3_SGL_SIZE(nfrag),
+ &dma, &cb_len);
+
+ /* scatterlist follows by the sg table */
+ sgt->sgl = (struct scatterlist *)(sgt + 1);
+ sg_init_table(sgt->sgl, nfrag);
+ nents = skb_to_sgvec(skb, sgt->sgl, 0, skb->len);
+ if (unlikely(nents < 0)) {
+ hns3_tx_spare_rollback(ring, cb_len);
+ hns3_ring_stats_update(ring, skb2sgl_err);
+ return -ENOMEM;
+ }
+
+ sgt->orig_nents = nents;
+ sgt->nents = dma_map_sg(ring_to_dev(ring), sgt->sgl, sgt->orig_nents,
+ DMA_TO_DEVICE);
+ if (unlikely(!sgt->nents)) {
+ hns3_tx_spare_rollback(ring, cb_len);
+ hns3_ring_stats_update(ring, map_sg_err);
+ return -ENOMEM;
+ }
+
+ desc_cb->priv = skb;
+ desc_cb->length = cb_len;
+ desc_cb->dma = dma;
+ desc_cb->type = DESC_TYPE_SGL_SKB;
+
+ for (i = 0; i < sgt->nents; i++)
+ bd_num += hns3_fill_desc(ring, sg_dma_address(sgt->sgl + i),
+ sg_dma_len(sgt->sgl + i));
+ hns3_ring_stats_update(ring, tx_sgl);
+
+ return bd_num;
+}
+
+static int hns3_handle_desc_filling(struct hns3_enet_ring *ring,
+ struct sk_buff *skb)
+{
+ u32 space;
+
+ if (!ring->tx_spare)
+ goto out;
+
+ space = hns3_tx_spare_space(ring);
+
+ if (hns3_can_use_tx_sgl(ring, skb, space))
+ return hns3_handle_tx_sgl(ring, skb);
+
+ if (hns3_can_use_tx_bounce(ring, skb, space))
+ return hns3_handle_tx_bounce(ring, skb);
+
+out:
+ return hns3_fill_skb_to_desc(ring, skb, DESC_TYPE_SKB);
+}
+
+static int hns3_handle_skb_desc(struct hns3_enet_ring *ring,
+ struct sk_buff *skb,
+ struct hns3_desc_cb *desc_cb,
+ int next_to_use_head)
+{
+ int ret;
+
+ ret = hns3_fill_skb_desc(ring, skb, &ring->desc[ring->next_to_use],
+ desc_cb);
+ if (unlikely(ret < 0))
+ goto fill_err;
+
+ /* 'ret < 0' means filling error, 'ret == 0' means skb->len is
+ * zero, which is unlikely, and 'ret > 0' means how many tx desc
+ * need to be notified to the hw.
+ */
+ ret = hns3_handle_desc_filling(ring, skb);
+ if (likely(ret > 0))
+ return ret;
+
+fill_err:
+ hns3_clear_desc(ring, next_to_use_head);
+ return ret;
+}
+
+netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hns3_enet_ring *ring = &priv->ring[skb->queue_mapping];
+ struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
+ struct netdev_queue *dev_queue;
+ int pre_ntu, ret;
+ bool doorbell;
+
+ /* Hardware can only handle short frames above 32 bytes */
+ if (skb_put_padto(skb, HNS3_MIN_TX_LEN)) {
+ hns3_tx_doorbell(ring, 0, !netdev_xmit_more());
+
+ hns3_ring_stats_update(ring, sw_err_cnt);
+
+ return NETDEV_TX_OK;
+ }
+
+ /* Prefetch the data used later */
+ prefetch(skb->data);
+
+ ret = hns3_nic_maybe_stop_tx(ring, netdev, skb);
+ if (unlikely(ret <= 0)) {
+ if (ret == -EBUSY) {
+ hns3_tx_doorbell(ring, 0, true);
+ return NETDEV_TX_BUSY;
+ }
+
+ hns3_rl_err(netdev, "xmit error: %d!\n", ret);
+ goto out_err_tx_ok;
+ }
+
+ ret = hns3_handle_skb_desc(ring, skb, desc_cb, ring->next_to_use);
+ if (unlikely(ret <= 0))
+ goto out_err_tx_ok;
+
+ pre_ntu = ring->next_to_use ? (ring->next_to_use - 1) :
+ (ring->desc_num - 1);
+
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
+ hns3_tsyn(netdev, skb, &ring->desc[pre_ntu]);
+
+ ring->desc[pre_ntu].tx.bdtp_fe_sc_vld_ra_ri |=
+ cpu_to_le16(BIT(HNS3_TXD_FE_B));
+ trace_hns3_tx_desc(ring, pre_ntu);
+
+ skb_tx_timestamp(skb);
+
+ /* Complete translate all packets */
+ dev_queue = netdev_get_tx_queue(netdev, ring->queue_index);
+ doorbell = __netdev_tx_sent_queue(dev_queue, desc_cb->send_bytes,
+ netdev_xmit_more());
+ hns3_tx_doorbell(ring, ret, doorbell);
+
+ return NETDEV_TX_OK;
+
+out_err_tx_ok:
+ dev_kfree_skb_any(skb);
+ hns3_tx_doorbell(ring, 0, !netdev_xmit_more());
+ return NETDEV_TX_OK;
+}
+
+static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
+{
+ char format_mac_addr_perm[HNAE3_FORMAT_MAC_ADDR_LEN];
+ char format_mac_addr_sa[HNAE3_FORMAT_MAC_ADDR_LEN];
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ struct sockaddr *mac_addr = p;
+ int ret;
+
+ if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ if (ether_addr_equal(netdev->dev_addr, mac_addr->sa_data)) {
+ hnae3_format_mac_addr(format_mac_addr_sa, mac_addr->sa_data);
+ netdev_info(netdev, "already using mac address %s\n",
+ format_mac_addr_sa);
+ return 0;
+ }
+
+ /* For VF device, if there is a perm_addr, then the user will not
+ * be allowed to change the address.
+ */
+ if (!hns3_is_phys_func(h->pdev) &&
+ !is_zero_ether_addr(netdev->perm_addr)) {
+ hnae3_format_mac_addr(format_mac_addr_perm, netdev->perm_addr);
+ hnae3_format_mac_addr(format_mac_addr_sa, mac_addr->sa_data);
+ netdev_err(netdev, "has permanent MAC %s, user MAC %s not allow\n",
+ format_mac_addr_perm, format_mac_addr_sa);
+ return -EPERM;
+ }
+
+ ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data, false);
+ if (ret) {
+ netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret);
+ return ret;
+ }
+
+ eth_hw_addr_set(netdev, mac_addr->sa_data);
+
+ return 0;
+}
+
+static int hns3_nic_do_ioctl(struct net_device *netdev,
+ struct ifreq *ifr, int cmd)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (!netif_running(netdev))
+ return -EINVAL;
+
+ if (!h->ae_algo->ops->do_ioctl)
+ return -EOPNOTSUPP;
+
+ return h->ae_algo->ops->do_ioctl(h, ifr, cmd);
+}
+
+static int hns3_nic_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ netdev_features_t changed = netdev->features ^ features;
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = priv->ae_handle;
+ bool enable;
+ int ret;
+
+ if (changed & (NETIF_F_GRO_HW) && h->ae_algo->ops->set_gro_en) {
+ enable = !!(features & NETIF_F_GRO_HW);
+ ret = h->ae_algo->ops->set_gro_en(h, enable);
+ if (ret)
+ return ret;
+ }
+
+ if ((changed & NETIF_F_HW_VLAN_CTAG_RX) &&
+ h->ae_algo->ops->enable_hw_strip_rxvtag) {
+ enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
+ ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, enable);
+ if (ret)
+ return ret;
+ }
+
+ if ((changed & NETIF_F_NTUPLE) && h->ae_algo->ops->enable_fd) {
+ enable = !!(features & NETIF_F_NTUPLE);
+ h->ae_algo->ops->enable_fd(h, enable);
+ }
+
+ if ((netdev->features & NETIF_F_HW_TC) > (features & NETIF_F_HW_TC) &&
+ h->ae_algo->ops->cls_flower_active(h)) {
+ netdev_err(netdev,
+ "there are offloaded TC filters active, cannot disable HW TC offload");
+ return -EINVAL;
+ }
+
+ if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) &&
+ h->ae_algo->ops->enable_vlan_filter) {
+ enable = !!(features & NETIF_F_HW_VLAN_CTAG_FILTER);
+ ret = h->ae_algo->ops->enable_vlan_filter(h, enable);
+ if (ret)
+ return ret;
+ }
+
+ netdev->features = features;
+ return 0;
+}
+
+static netdev_features_t hns3_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
+{
+#define HNS3_MAX_HDR_LEN 480U
+#define HNS3_MAX_L4_HDR_LEN 60U
+
+ size_t len;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return features;
+
+ if (skb->encapsulation)
+ len = skb_inner_transport_header(skb) - skb->data;
+ else
+ len = skb_transport_header(skb) - skb->data;
+
+ /* Assume L4 is 60 byte as TCP is the only protocol with a
+ * a flexible value, and it's max len is 60 bytes.
+ */
+ len += HNS3_MAX_L4_HDR_LEN;
+
+ /* Hardware only supports checksum on the skb with a max header
+ * len of 480 bytes.
+ */
+ if (len > HNS3_MAX_HDR_LEN)
+ features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+
+ return features;
+}
+
+static void hns3_fetch_stats(struct rtnl_link_stats64 *stats,
+ struct hns3_enet_ring *ring, bool is_tx)
+{
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin(&ring->syncp);
+ if (is_tx) {
+ stats->tx_bytes += ring->stats.tx_bytes;
+ stats->tx_packets += ring->stats.tx_pkts;
+ stats->tx_dropped += ring->stats.sw_err_cnt;
+ stats->tx_dropped += ring->stats.tx_vlan_err;
+ stats->tx_dropped += ring->stats.tx_l4_proto_err;
+ stats->tx_dropped += ring->stats.tx_l2l3l4_err;
+ stats->tx_dropped += ring->stats.tx_tso_err;
+ stats->tx_dropped += ring->stats.over_max_recursion;
+ stats->tx_dropped += ring->stats.hw_limitation;
+ stats->tx_dropped += ring->stats.copy_bits_err;
+ stats->tx_dropped += ring->stats.skb2sgl_err;
+ stats->tx_dropped += ring->stats.map_sg_err;
+ stats->tx_errors += ring->stats.sw_err_cnt;
+ stats->tx_errors += ring->stats.tx_vlan_err;
+ stats->tx_errors += ring->stats.tx_l4_proto_err;
+ stats->tx_errors += ring->stats.tx_l2l3l4_err;
+ stats->tx_errors += ring->stats.tx_tso_err;
+ stats->tx_errors += ring->stats.over_max_recursion;
+ stats->tx_errors += ring->stats.hw_limitation;
+ stats->tx_errors += ring->stats.copy_bits_err;
+ stats->tx_errors += ring->stats.skb2sgl_err;
+ stats->tx_errors += ring->stats.map_sg_err;
+ } else {
+ stats->rx_bytes += ring->stats.rx_bytes;
+ stats->rx_packets += ring->stats.rx_pkts;
+ stats->rx_dropped += ring->stats.l2_err;
+ stats->rx_errors += ring->stats.l2_err;
+ stats->rx_errors += ring->stats.l3l4_csum_err;
+ stats->rx_crc_errors += ring->stats.l2_err;
+ stats->multicast += ring->stats.rx_multicast;
+ stats->rx_length_errors += ring->stats.err_pkt_len;
+ }
+ } while (u64_stats_fetch_retry(&ring->syncp, start));
+}
+
+static void hns3_nic_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ int queue_num = priv->ae_handle->kinfo.num_tqps;
+ struct hnae3_handle *handle = priv->ae_handle;
+ struct rtnl_link_stats64 ring_total_stats;
+ struct hns3_enet_ring *ring;
+ unsigned int idx;
+
+ if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state))
+ return;
+
+ handle->ae_algo->ops->update_stats(handle);
+
+ memset(&ring_total_stats, 0, sizeof(ring_total_stats));
+ for (idx = 0; idx < queue_num; idx++) {
+ /* fetch the tx stats */
+ ring = &priv->ring[idx];
+ hns3_fetch_stats(&ring_total_stats, ring, true);
+
+ /* fetch the rx stats */
+ ring = &priv->ring[idx + queue_num];
+ hns3_fetch_stats(&ring_total_stats, ring, false);
+ }
+
+ stats->tx_bytes = ring_total_stats.tx_bytes;
+ stats->tx_packets = ring_total_stats.tx_packets;
+ stats->rx_bytes = ring_total_stats.rx_bytes;
+ stats->rx_packets = ring_total_stats.rx_packets;
+
+ stats->rx_errors = ring_total_stats.rx_errors;
+ stats->multicast = ring_total_stats.multicast;
+ stats->rx_length_errors = ring_total_stats.rx_length_errors;
+ stats->rx_crc_errors = ring_total_stats.rx_crc_errors;
+ stats->rx_missed_errors = netdev->stats.rx_missed_errors;
+
+ stats->tx_errors = ring_total_stats.tx_errors;
+ stats->rx_dropped = ring_total_stats.rx_dropped;
+ stats->tx_dropped = ring_total_stats.tx_dropped;
+ stats->collisions = netdev->stats.collisions;
+ stats->rx_over_errors = netdev->stats.rx_over_errors;
+ stats->rx_frame_errors = netdev->stats.rx_frame_errors;
+ stats->rx_fifo_errors = netdev->stats.rx_fifo_errors;
+ stats->tx_aborted_errors = netdev->stats.tx_aborted_errors;
+ stats->tx_carrier_errors = netdev->stats.tx_carrier_errors;
+ stats->tx_fifo_errors = netdev->stats.tx_fifo_errors;
+ stats->tx_heartbeat_errors = netdev->stats.tx_heartbeat_errors;
+ stats->tx_window_errors = netdev->stats.tx_window_errors;
+ stats->rx_compressed = netdev->stats.rx_compressed;
+ stats->tx_compressed = netdev->stats.tx_compressed;
+}
+
+static int hns3_setup_tc(struct net_device *netdev, void *type_data)
+{
+ struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
+ struct hnae3_knic_private_info *kinfo;
+ u8 tc = mqprio_qopt->qopt.num_tc;
+ u16 mode = mqprio_qopt->mode;
+ u8 hw = mqprio_qopt->qopt.hw;
+ struct hnae3_handle *h;
+
+ if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS &&
+ mode == TC_MQPRIO_MODE_CHANNEL) || (!hw && tc == 0)))
+ return -EOPNOTSUPP;
+
+ if (tc > HNAE3_MAX_TC)
+ return -EINVAL;
+
+ if (!netdev)
+ return -EINVAL;
+
+ h = hns3_get_handle(netdev);
+ kinfo = &h->kinfo;
+
+ netif_dbg(h, drv, netdev, "setup tc: num_tc=%u\n", tc);
+
+ return (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ?
+ kinfo->dcb_ops->setup_tc(h, mqprio_qopt) : -EOPNOTSUPP;
+}
+
+static int hns3_setup_tc_cls_flower(struct hns3_nic_priv *priv,
+ struct flow_cls_offload *flow)
+{
+ int tc = tc_classid_to_hwtc(priv->netdev, flow->classid);
+ struct hnae3_handle *h = hns3_get_handle(priv->netdev);
+
+ switch (flow->command) {
+ case FLOW_CLS_REPLACE:
+ if (h->ae_algo->ops->add_cls_flower)
+ return h->ae_algo->ops->add_cls_flower(h, flow, tc);
+ break;
+ case FLOW_CLS_DESTROY:
+ if (h->ae_algo->ops->del_cls_flower)
+ return h->ae_algo->ops->del_cls_flower(h, flow);
+ break;
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int hns3_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ struct hns3_nic_priv *priv = cb_priv;
+
+ if (!tc_cls_can_offload_and_chain0(priv->netdev, type_data))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return hns3_setup_tc_cls_flower(priv, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static LIST_HEAD(hns3_block_cb_list);
+
+static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct hns3_nic_priv *priv = netdev_priv(dev);
+ int ret;
+
+ switch (type) {
+ case TC_SETUP_QDISC_MQPRIO:
+ ret = hns3_setup_tc(dev, type_data);
+ break;
+ case TC_SETUP_BLOCK:
+ ret = flow_block_cb_setup_simple(type_data,
+ &hns3_block_cb_list,
+ hns3_setup_tc_block_cb,
+ priv, priv, true);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static int hns3_vlan_rx_add_vid(struct net_device *netdev,
+ __be16 proto, u16 vid)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ int ret = -EIO;
+
+ if (h->ae_algo->ops->set_vlan_filter)
+ ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false);
+
+ return ret;
+}
+
+static int hns3_vlan_rx_kill_vid(struct net_device *netdev,
+ __be16 proto, u16 vid)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ int ret = -EIO;
+
+ if (h->ae_algo->ops->set_vlan_filter)
+ ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true);
+
+ return ret;
+}
+
+static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
+ u8 qos, __be16 vlan_proto)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ int ret = -EIO;
+
+ netif_dbg(h, drv, netdev,
+ "set vf vlan: vf=%d, vlan=%u, qos=%u, vlan_proto=0x%x\n",
+ vf, vlan, qos, ntohs(vlan_proto));
+
+ if (h->ae_algo->ops->set_vf_vlan_filter)
+ ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan,
+ qos, vlan_proto);
+
+ return ret;
+}
+
+static int hns3_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
+{
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+
+ if (hns3_nic_resetting(netdev))
+ return -EBUSY;
+
+ if (!handle->ae_algo->ops->set_vf_spoofchk)
+ return -EOPNOTSUPP;
+
+ return handle->ae_algo->ops->set_vf_spoofchk(handle, vf, enable);
+}
+
+static int hns3_set_vf_trust(struct net_device *netdev, int vf, bool enable)
+{
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+
+ if (!handle->ae_algo->ops->set_vf_trust)
+ return -EOPNOTSUPP;
+
+ return handle->ae_algo->ops->set_vf_trust(handle, vf, enable);
+}
+
+static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ int ret;
+
+ if (hns3_nic_resetting(netdev))
+ return -EBUSY;
+
+ if (!h->ae_algo->ops->set_mtu)
+ return -EOPNOTSUPP;
+
+ netif_dbg(h, drv, netdev,
+ "change mtu from %u to %d\n", netdev->mtu, new_mtu);
+
+ ret = h->ae_algo->ops->set_mtu(h, new_mtu);
+ if (ret)
+ netdev_err(netdev, "failed to change MTU in hardware %d\n",
+ ret);
+ else
+ netdev->mtu = new_mtu;
+
+ return ret;
+}
+
+static int hns3_get_timeout_queue(struct net_device *ndev)
+{
+ int i;
+
+ /* Find the stopped queue the same way the stack does */
+ for (i = 0; i < ndev->num_tx_queues; i++) {
+ struct netdev_queue *q;
+ unsigned long trans_start;
+
+ q = netdev_get_tx_queue(ndev, i);
+ trans_start = READ_ONCE(q->trans_start);
+ if (netif_xmit_stopped(q) &&
+ time_after(jiffies,
+ (trans_start + ndev->watchdog_timeo))) {
+#ifdef CONFIG_BQL
+ struct dql *dql = &q->dql;
+
+ netdev_info(ndev, "DQL info last_cnt: %u, queued: %u, adj_limit: %u, completed: %u\n",
+ dql->last_obj_cnt, dql->num_queued,
+ dql->adj_limit, dql->num_completed);
+#endif
+ netdev_info(ndev, "queue state: 0x%lx, delta msecs: %u\n",
+ q->state,
+ jiffies_to_msecs(jiffies - trans_start));
+ break;
+ }
+ }
+
+ return i;
+}
+
+static void hns3_dump_queue_stats(struct net_device *ndev,
+ struct hns3_enet_ring *tx_ring,
+ int timeout_queue)
+{
+ struct napi_struct *napi = &tx_ring->tqp_vector->napi;
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+
+ netdev_info(ndev,
+ "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, napi state: %lu\n",
+ priv->tx_timeout_count, timeout_queue, tx_ring->next_to_use,
+ tx_ring->next_to_clean, napi->state);
+
+ netdev_info(ndev,
+ "tx_pkts: %llu, tx_bytes: %llu, sw_err_cnt: %llu, tx_pending: %d\n",
+ tx_ring->stats.tx_pkts, tx_ring->stats.tx_bytes,
+ tx_ring->stats.sw_err_cnt, tx_ring->pending_buf);
+
+ netdev_info(ndev,
+ "seg_pkt_cnt: %llu, tx_more: %llu, restart_queue: %llu, tx_busy: %llu\n",
+ tx_ring->stats.seg_pkt_cnt, tx_ring->stats.tx_more,
+ tx_ring->stats.restart_queue, tx_ring->stats.tx_busy);
+
+ netdev_info(ndev, "tx_push: %llu, tx_mem_doorbell: %llu\n",
+ tx_ring->stats.tx_push, tx_ring->stats.tx_mem_doorbell);
+}
+
+static void hns3_dump_queue_reg(struct net_device *ndev,
+ struct hns3_enet_ring *tx_ring)
+{
+ netdev_info(ndev,
+ "BD_NUM: 0x%x HW_HEAD: 0x%x, HW_TAIL: 0x%x, BD_ERR: 0x%x, INT: 0x%x\n",
+ hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_BD_NUM_REG),
+ hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_HEAD_REG),
+ hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_TAIL_REG),
+ hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_BD_ERR_REG),
+ readl(tx_ring->tqp_vector->mask_addr));
+ netdev_info(ndev,
+ "RING_EN: 0x%x, TC: 0x%x, FBD_NUM: 0x%x FBD_OFT: 0x%x, EBD_NUM: 0x%x, EBD_OFT: 0x%x\n",
+ hns3_tqp_read_reg(tx_ring, HNS3_RING_EN_REG),
+ hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_TC_REG),
+ hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_FBDNUM_REG),
+ hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_OFFSET_REG),
+ hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_EBDNUM_REG),
+ hns3_tqp_read_reg(tx_ring,
+ HNS3_RING_TX_RING_EBD_OFFSET_REG));
+}
+
+static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
+{
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hnae3_handle *h = hns3_get_handle(ndev);
+ struct hns3_enet_ring *tx_ring;
+ int timeout_queue;
+
+ timeout_queue = hns3_get_timeout_queue(ndev);
+ if (timeout_queue >= ndev->num_tx_queues) {
+ netdev_info(ndev,
+ "no netdev TX timeout queue found, timeout count: %llu\n",
+ priv->tx_timeout_count);
+ return false;
+ }
+
+ priv->tx_timeout_count++;
+
+ tx_ring = &priv->ring[timeout_queue];
+ hns3_dump_queue_stats(ndev, tx_ring, timeout_queue);
+
+ /* When mac received many pause frames continuous, it's unable to send
+ * packets, which may cause tx timeout
+ */
+ if (h->ae_algo->ops->get_mac_stats) {
+ struct hns3_mac_stats mac_stats;
+
+ h->ae_algo->ops->get_mac_stats(h, &mac_stats);
+ netdev_info(ndev, "tx_pause_cnt: %llu, rx_pause_cnt: %llu\n",
+ mac_stats.tx_pause_cnt, mac_stats.rx_pause_cnt);
+ }
+
+ hns3_dump_queue_reg(ndev, tx_ring);
+
+ return true;
+}
+
+static void hns3_nic_net_timeout(struct net_device *ndev, unsigned int txqueue)
+{
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hnae3_handle *h = priv->ae_handle;
+
+ if (!hns3_get_tx_timeo_queue_info(ndev))
+ return;
+
+ /* request the reset, and let the hclge to determine
+ * which reset level should be done
+ */
+ if (h->ae_algo->ops->reset_event)
+ h->ae_algo->ops->reset_event(h->pdev, h);
+}
+
+#ifdef CONFIG_RFS_ACCEL
+static int hns3_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
+ u16 rxq_index, u32 flow_id)
+{
+ struct hnae3_handle *h = hns3_get_handle(dev);
+ struct flow_keys fkeys;
+
+ if (!h->ae_algo->ops->add_arfs_entry)
+ return -EOPNOTSUPP;
+
+ if (skb->encapsulation)
+ return -EPROTONOSUPPORT;
+
+ if (!skb_flow_dissect_flow_keys(skb, &fkeys, 0))
+ return -EPROTONOSUPPORT;
+
+ if ((fkeys.basic.n_proto != htons(ETH_P_IP) &&
+ fkeys.basic.n_proto != htons(ETH_P_IPV6)) ||
+ (fkeys.basic.ip_proto != IPPROTO_TCP &&
+ fkeys.basic.ip_proto != IPPROTO_UDP))
+ return -EPROTONOSUPPORT;
+
+ return h->ae_algo->ops->add_arfs_entry(h, rxq_index, flow_id, &fkeys);
+}
+#endif
+
+static int hns3_nic_get_vf_config(struct net_device *ndev, int vf,
+ struct ifla_vf_info *ivf)
+{
+ struct hnae3_handle *h = hns3_get_handle(ndev);
+
+ if (!h->ae_algo->ops->get_vf_config)
+ return -EOPNOTSUPP;
+
+ return h->ae_algo->ops->get_vf_config(h, vf, ivf);
+}
+
+static int hns3_nic_set_vf_link_state(struct net_device *ndev, int vf,
+ int link_state)
+{
+ struct hnae3_handle *h = hns3_get_handle(ndev);
+
+ if (!h->ae_algo->ops->set_vf_link_state)
+ return -EOPNOTSUPP;
+
+ return h->ae_algo->ops->set_vf_link_state(h, vf, link_state);
+}
+
+static int hns3_nic_set_vf_rate(struct net_device *ndev, int vf,
+ int min_tx_rate, int max_tx_rate)
+{
+ struct hnae3_handle *h = hns3_get_handle(ndev);
+
+ if (!h->ae_algo->ops->set_vf_rate)
+ return -EOPNOTSUPP;
+
+ return h->ae_algo->ops->set_vf_rate(h, vf, min_tx_rate, max_tx_rate,
+ false);
+}
+
+static int hns3_nic_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
+
+ if (!h->ae_algo->ops->set_vf_mac)
+ return -EOPNOTSUPP;
+
+ if (is_multicast_ether_addr(mac)) {
+ hnae3_format_mac_addr(format_mac_addr, mac);
+ netdev_err(netdev,
+ "Invalid MAC:%s specified. Could not set MAC\n",
+ format_mac_addr);
+ return -EINVAL;
+ }
+
+ return h->ae_algo->ops->set_vf_mac(h, vf_id, mac);
+}
+
+#define HNS3_INVALID_DSCP 0xff
+#define HNS3_DSCP_SHIFT 2
+
+static u8 hns3_get_skb_dscp(struct sk_buff *skb)
+{
+ __be16 protocol = skb->protocol;
+ u8 dscp = HNS3_INVALID_DSCP;
+
+ if (protocol == htons(ETH_P_8021Q))
+ protocol = vlan_get_protocol(skb);
+
+ if (protocol == htons(ETH_P_IP))
+ dscp = ipv4_get_dsfield(ip_hdr(skb)) >> HNS3_DSCP_SHIFT;
+ else if (protocol == htons(ETH_P_IPV6))
+ dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> HNS3_DSCP_SHIFT;
+
+ return dscp;
+}
+
+static u16 hns3_nic_select_queue(struct net_device *netdev,
+ struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ u8 dscp;
+
+ if (h->kinfo.tc_map_mode != HNAE3_TC_MAP_MODE_DSCP ||
+ !h->ae_algo->ops->get_dscp_prio)
+ goto out;
+
+ dscp = hns3_get_skb_dscp(skb);
+ if (unlikely(dscp >= HNAE3_MAX_DSCP))
+ goto out;
+
+ skb->priority = h->kinfo.dscp_prio[dscp];
+ if (skb->priority == HNAE3_PRIO_ID_INVALID)
+ skb->priority = 0;
+
+out:
+ return netdev_pick_tx(netdev, skb, sb_dev);
+}
+
+static const struct net_device_ops hns3_nic_netdev_ops = {
+ .ndo_open = hns3_nic_net_open,
+ .ndo_stop = hns3_nic_net_stop,
+ .ndo_start_xmit = hns3_nic_net_xmit,
+ .ndo_tx_timeout = hns3_nic_net_timeout,
+ .ndo_set_mac_address = hns3_nic_net_set_mac_address,
+ .ndo_eth_ioctl = hns3_nic_do_ioctl,
+ .ndo_change_mtu = hns3_nic_change_mtu,
+ .ndo_set_features = hns3_nic_set_features,
+ .ndo_features_check = hns3_features_check,
+ .ndo_get_stats64 = hns3_nic_get_stats64,
+ .ndo_setup_tc = hns3_nic_setup_tc,
+ .ndo_set_rx_mode = hns3_nic_set_rx_mode,
+ .ndo_vlan_rx_add_vid = hns3_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = hns3_vlan_rx_kill_vid,
+ .ndo_set_vf_vlan = hns3_ndo_set_vf_vlan,
+ .ndo_set_vf_spoofchk = hns3_set_vf_spoofchk,
+ .ndo_set_vf_trust = hns3_set_vf_trust,
+#ifdef CONFIG_RFS_ACCEL
+ .ndo_rx_flow_steer = hns3_rx_flow_steer,
+#endif
+ .ndo_get_vf_config = hns3_nic_get_vf_config,
+ .ndo_set_vf_link_state = hns3_nic_set_vf_link_state,
+ .ndo_set_vf_rate = hns3_nic_set_vf_rate,
+ .ndo_set_vf_mac = hns3_nic_set_vf_mac,
+ .ndo_select_queue = hns3_nic_select_queue,
+};
+
+bool hns3_is_phys_func(struct pci_dev *pdev)
+{
+ u32 dev_id = pdev->device;
+
+ switch (dev_id) {
+ case HNAE3_DEV_ID_GE:
+ case HNAE3_DEV_ID_25GE:
+ case HNAE3_DEV_ID_25GE_RDMA:
+ case HNAE3_DEV_ID_25GE_RDMA_MACSEC:
+ case HNAE3_DEV_ID_50GE_RDMA:
+ case HNAE3_DEV_ID_50GE_RDMA_MACSEC:
+ case HNAE3_DEV_ID_100G_RDMA_MACSEC:
+ case HNAE3_DEV_ID_200G_RDMA:
+ return true;
+ case HNAE3_DEV_ID_VF:
+ case HNAE3_DEV_ID_RDMA_DCB_PFC_VF:
+ return false;
+ default:
+ dev_warn(&pdev->dev, "un-recognized pci device-id %u",
+ dev_id);
+ }
+
+ return false;
+}
+
+static void hns3_disable_sriov(struct pci_dev *pdev)
+{
+ /* If our VFs are assigned we cannot shut down SR-IOV
+ * without causing issues, so just leave the hardware
+ * available but disabled
+ */
+ if (pci_vfs_assigned(pdev)) {
+ dev_warn(&pdev->dev,
+ "disabling driver while VFs are assigned\n");
+ return;
+ }
+
+ pci_disable_sriov(pdev);
+}
+
+/* hns3_probe - Device initialization routine
+ * @pdev: PCI device information struct
+ * @ent: entry in hns3_pci_tbl
+ *
+ * hns3_probe initializes a PF identified by a pci_dev structure.
+ * The OS initialization, configuring of the PF private structure,
+ * and a hardware reset occur.
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct hnae3_ae_dev *ae_dev;
+ int ret;
+
+ ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev), GFP_KERNEL);
+ if (!ae_dev)
+ return -ENOMEM;
+
+ ae_dev->pdev = pdev;
+ ae_dev->flag = ent->driver_data;
+ pci_set_drvdata(pdev, ae_dev);
+
+ ret = hnae3_register_ae_dev(ae_dev);
+ if (ret)
+ pci_set_drvdata(pdev, NULL);
+
+ return ret;
+}
+
+/**
+ * hns3_clean_vf_config
+ * @pdev: pointer to a pci_dev structure
+ * @num_vfs: number of VFs allocated
+ *
+ * Clean residual vf config after disable sriov
+ **/
+static void hns3_clean_vf_config(struct pci_dev *pdev, int num_vfs)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+
+ if (ae_dev->ops->clean_vf_config)
+ ae_dev->ops->clean_vf_config(ae_dev, num_vfs);
+}
+
+/* hns3_remove - Device removal routine
+ * @pdev: PCI device information struct
+ */
+static void hns3_remove(struct pci_dev *pdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+
+ if (hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))
+ hns3_disable_sriov(pdev);
+
+ hnae3_unregister_ae_dev(ae_dev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+/**
+ * hns3_pci_sriov_configure
+ * @pdev: pointer to a pci_dev structure
+ * @num_vfs: number of VFs to allocate
+ *
+ * Enable or change the number of VFs. Called when the user updates the number
+ * of VFs in sysfs.
+ **/
+static int hns3_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ int ret;
+
+ if (!(hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))) {
+ dev_warn(&pdev->dev, "Can not config SRIOV\n");
+ return -EINVAL;
+ }
+
+ if (num_vfs) {
+ ret = pci_enable_sriov(pdev, num_vfs);
+ if (ret)
+ dev_err(&pdev->dev, "SRIOV enable failed %d\n", ret);
+ else
+ return num_vfs;
+ } else if (!pci_vfs_assigned(pdev)) {
+ int num_vfs_pre = pci_num_vf(pdev);
+
+ pci_disable_sriov(pdev);
+ hns3_clean_vf_config(pdev, num_vfs_pre);
+ } else {
+ dev_warn(&pdev->dev,
+ "Unable to free VFs because some are assigned to VMs.\n");
+ }
+
+ return 0;
+}
+
+static void hns3_shutdown(struct pci_dev *pdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+
+ hnae3_unregister_ae_dev(ae_dev);
+ pci_set_drvdata(pdev, NULL);
+
+ if (system_state == SYSTEM_POWER_OFF)
+ pci_set_power_state(pdev, PCI_D3hot);
+}
+
+static int __maybe_unused hns3_suspend(struct device *dev)
+{
+ struct hnae3_ae_dev *ae_dev = dev_get_drvdata(dev);
+
+ if (ae_dev && hns3_is_phys_func(ae_dev->pdev)) {
+ dev_info(dev, "Begin to suspend.\n");
+ if (ae_dev->ops && ae_dev->ops->reset_prepare)
+ ae_dev->ops->reset_prepare(ae_dev, HNAE3_FUNC_RESET);
+ }
+
+ return 0;
+}
+
+static int __maybe_unused hns3_resume(struct device *dev)
+{
+ struct hnae3_ae_dev *ae_dev = dev_get_drvdata(dev);
+
+ if (ae_dev && hns3_is_phys_func(ae_dev->pdev)) {
+ dev_info(dev, "Begin to resume.\n");
+ if (ae_dev->ops && ae_dev->ops->reset_done)
+ ae_dev->ops->reset_done(ae_dev);
+ }
+
+ return 0;
+}
+
+static pci_ers_result_t hns3_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+ pci_ers_result_t ret;
+
+ dev_info(&pdev->dev, "PCI error detected, state(=%u)!!\n", state);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ if (!ae_dev || !ae_dev->ops) {
+ dev_err(&pdev->dev,
+ "Can't recover - error happened before device initialized\n");
+ return PCI_ERS_RESULT_NONE;
+ }
+
+ if (ae_dev->ops->handle_hw_ras_error)
+ ret = ae_dev->ops->handle_hw_ras_error(ae_dev);
+ else
+ return PCI_ERS_RESULT_NONE;
+
+ return ret;
+}
+
+static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+ const struct hnae3_ae_ops *ops;
+ enum hnae3_reset_type reset_type;
+ struct device *dev = &pdev->dev;
+
+ if (!ae_dev || !ae_dev->ops)
+ return PCI_ERS_RESULT_NONE;
+
+ ops = ae_dev->ops;
+ /* request the reset */
+ if (ops->reset_event && ops->get_reset_level &&
+ ops->set_default_reset_request) {
+ if (ae_dev->hw_err_reset_req) {
+ reset_type = ops->get_reset_level(ae_dev,
+ &ae_dev->hw_err_reset_req);
+ ops->set_default_reset_request(ae_dev, reset_type);
+ dev_info(dev, "requesting reset due to PCI error\n");
+ ops->reset_event(pdev, NULL);
+ }
+
+ return PCI_ERS_RESULT_RECOVERED;
+ }
+
+ return PCI_ERS_RESULT_DISCONNECT;
+}
+
+static void hns3_reset_prepare(struct pci_dev *pdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+
+ dev_info(&pdev->dev, "FLR prepare\n");
+ if (ae_dev && ae_dev->ops && ae_dev->ops->reset_prepare)
+ ae_dev->ops->reset_prepare(ae_dev, HNAE3_FLR_RESET);
+}
+
+static void hns3_reset_done(struct pci_dev *pdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+
+ dev_info(&pdev->dev, "FLR done\n");
+ if (ae_dev && ae_dev->ops && ae_dev->ops->reset_done)
+ ae_dev->ops->reset_done(ae_dev);
+}
+
+static const struct pci_error_handlers hns3_err_handler = {
+ .error_detected = hns3_error_detected,
+ .slot_reset = hns3_slot_reset,
+ .reset_prepare = hns3_reset_prepare,
+ .reset_done = hns3_reset_done,
+};
+
+static SIMPLE_DEV_PM_OPS(hns3_pm_ops, hns3_suspend, hns3_resume);
+
+static struct pci_driver hns3_driver = {
+ .name = hns3_driver_name,
+ .id_table = hns3_pci_tbl,
+ .probe = hns3_probe,
+ .remove = hns3_remove,
+ .shutdown = hns3_shutdown,
+ .driver.pm = &hns3_pm_ops,
+ .sriov_configure = hns3_pci_sriov_configure,
+ .err_handler = &hns3_err_handler,
+};
+
+/* set default feature to hns3 */
+static void hns3_set_default_feature(struct net_device *netdev)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ struct pci_dev *pdev = h->pdev;
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
+ NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
+ NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_SCTP_CRC | NETIF_F_FRAGLIST;
+
+ if (hnae3_ae_dev_gro_supported(ae_dev))
+ netdev->features |= NETIF_F_GRO_HW;
+
+ if (hnae3_ae_dev_fd_supported(ae_dev))
+ netdev->features |= NETIF_F_NTUPLE;
+
+ if (test_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, ae_dev->caps))
+ netdev->features |= NETIF_F_GSO_UDP_L4;
+
+ if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps))
+ netdev->features |= NETIF_F_HW_CSUM;
+ else
+ netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+
+ if (test_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, ae_dev->caps))
+ netdev->features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
+
+ if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps))
+ netdev->features |= NETIF_F_HW_TC;
+
+ netdev->hw_features |= netdev->features;
+ if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps))
+ netdev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ netdev->vlan_features |= netdev->features &
+ ~(NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_GRO_HW | NETIF_F_NTUPLE |
+ NETIF_F_HW_TC);
+
+ netdev->hw_enc_features |= netdev->vlan_features | NETIF_F_TSO_MANGLEID;
+
+ /* The device_version V3 hardware can't offload the checksum for IP in
+ * GRE packets, but can do it for NvGRE. So default to disable the
+ * checksum and GSO offload for GRE.
+ */
+ if (ae_dev->dev_version > HNAE3_DEVICE_VERSION_V2) {
+ netdev->features &= ~NETIF_F_GSO_GRE;
+ netdev->features &= ~NETIF_F_GSO_GRE_CSUM;
+ }
+}
+
+static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
+ struct hns3_desc_cb *cb)
+{
+ unsigned int order = hns3_page_order(ring);
+ struct page *p;
+
+ if (ring->page_pool) {
+ p = page_pool_dev_alloc_frag(ring->page_pool,
+ &cb->page_offset,
+ hns3_buf_size(ring));
+ if (unlikely(!p))
+ return -ENOMEM;
+
+ cb->priv = p;
+ cb->buf = page_address(p);
+ cb->dma = page_pool_get_dma_addr(p);
+ cb->type = DESC_TYPE_PP_FRAG;
+ cb->reuse_flag = 0;
+ return 0;
+ }
+
+ p = dev_alloc_pages(order);
+ if (!p)
+ return -ENOMEM;
+
+ cb->priv = p;
+ cb->page_offset = 0;
+ cb->reuse_flag = 0;
+ cb->buf = page_address(p);
+ cb->length = hns3_page_size(ring);
+ cb->type = DESC_TYPE_PAGE;
+ page_ref_add(p, USHRT_MAX - 1);
+ cb->pagecnt_bias = USHRT_MAX;
+
+ return 0;
+}
+
+static void hns3_free_buffer(struct hns3_enet_ring *ring,
+ struct hns3_desc_cb *cb, int budget)
+{
+ if (cb->type & (DESC_TYPE_SKB | DESC_TYPE_BOUNCE_HEAD |
+ DESC_TYPE_BOUNCE_ALL | DESC_TYPE_SGL_SKB))
+ napi_consume_skb(cb->priv, budget);
+ else if (!HNAE3_IS_TX_RING(ring)) {
+ if (cb->type & DESC_TYPE_PAGE && cb->pagecnt_bias)
+ __page_frag_cache_drain(cb->priv, cb->pagecnt_bias);
+ else if (cb->type & DESC_TYPE_PP_FRAG)
+ page_pool_put_full_page(ring->page_pool, cb->priv,
+ false);
+ }
+ memset(cb, 0, sizeof(*cb));
+}
+
+static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb)
+{
+ cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0,
+ cb->length, ring_to_dma_dir(ring));
+
+ if (unlikely(dma_mapping_error(ring_to_dev(ring), cb->dma)))
+ return -EIO;
+
+ return 0;
+}
+
+static void hns3_unmap_buffer(struct hns3_enet_ring *ring,
+ struct hns3_desc_cb *cb)
+{
+ if (cb->type & (DESC_TYPE_SKB | DESC_TYPE_FRAGLIST_SKB))
+ dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
+ ring_to_dma_dir(ring));
+ else if ((cb->type & DESC_TYPE_PAGE) && cb->length)
+ dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
+ ring_to_dma_dir(ring));
+ else if (cb->type & (DESC_TYPE_BOUNCE_ALL | DESC_TYPE_BOUNCE_HEAD |
+ DESC_TYPE_SGL_SKB))
+ hns3_tx_spare_reclaim_cb(ring, cb);
+}
+
+static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i)
+{
+ hns3_unmap_buffer(ring, &ring->desc_cb[i]);
+ ring->desc[i].addr = 0;
+ ring->desc_cb[i].refill = 0;
+}
+
+static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i,
+ int budget)
+{
+ struct hns3_desc_cb *cb = &ring->desc_cb[i];
+
+ if (!ring->desc_cb[i].dma)
+ return;
+
+ hns3_buffer_detach(ring, i);
+ hns3_free_buffer(ring, cb, budget);
+}
+
+static void hns3_free_buffers(struct hns3_enet_ring *ring)
+{
+ int i;
+
+ for (i = 0; i < ring->desc_num; i++)
+ hns3_free_buffer_detach(ring, i, 0);
+}
+
+/* free desc along with its attached buffer */
+static void hns3_free_desc(struct hns3_enet_ring *ring)
+{
+ int size = ring->desc_num * sizeof(ring->desc[0]);
+
+ hns3_free_buffers(ring);
+
+ if (ring->desc) {
+ dma_free_coherent(ring_to_dev(ring), size,
+ ring->desc, ring->desc_dma_addr);
+ ring->desc = NULL;
+ }
+}
+
+static int hns3_alloc_desc(struct hns3_enet_ring *ring)
+{
+ int size = ring->desc_num * sizeof(ring->desc[0]);
+
+ ring->desc = dma_alloc_coherent(ring_to_dev(ring), size,
+ &ring->desc_dma_addr, GFP_KERNEL);
+ if (!ring->desc)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int hns3_alloc_and_map_buffer(struct hns3_enet_ring *ring,
+ struct hns3_desc_cb *cb)
+{
+ int ret;
+
+ ret = hns3_alloc_buffer(ring, cb);
+ if (ret || ring->page_pool)
+ goto out;
+
+ ret = hns3_map_buffer(ring, cb);
+ if (ret)
+ goto out_with_buf;
+
+ return 0;
+
+out_with_buf:
+ hns3_free_buffer(ring, cb, 0);
+out:
+ return ret;
+}
+
+static int hns3_alloc_and_attach_buffer(struct hns3_enet_ring *ring, int i)
+{
+ int ret = hns3_alloc_and_map_buffer(ring, &ring->desc_cb[i]);
+
+ if (ret)
+ return ret;
+
+ ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +
+ ring->desc_cb[i].page_offset);
+ ring->desc_cb[i].refill = 1;
+
+ return 0;
+}
+
+/* Allocate memory for raw pkg, and map with dma */
+static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring)
+{
+ int i, j, ret;
+
+ for (i = 0; i < ring->desc_num; i++) {
+ ret = hns3_alloc_and_attach_buffer(ring, i);
+ if (ret)
+ goto out_buffer_fail;
+ }
+
+ return 0;
+
+out_buffer_fail:
+ for (j = i - 1; j >= 0; j--)
+ hns3_free_buffer_detach(ring, j, 0);
+ return ret;
+}
+
+/* detach a in-used buffer and replace with a reserved one */
+static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
+ struct hns3_desc_cb *res_cb)
+{
+ hns3_unmap_buffer(ring, &ring->desc_cb[i]);
+ ring->desc_cb[i] = *res_cb;
+ ring->desc_cb[i].refill = 1;
+ ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +
+ ring->desc_cb[i].page_offset);
+ ring->desc[i].rx.bd_base_info = 0;
+}
+
+static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
+{
+ ring->desc_cb[i].reuse_flag = 0;
+ ring->desc_cb[i].refill = 1;
+ ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +
+ ring->desc_cb[i].page_offset);
+ ring->desc[i].rx.bd_base_info = 0;
+
+ dma_sync_single_for_device(ring_to_dev(ring),
+ ring->desc_cb[i].dma + ring->desc_cb[i].page_offset,
+ hns3_buf_size(ring),
+ DMA_FROM_DEVICE);
+}
+
+static bool hns3_nic_reclaim_desc(struct hns3_enet_ring *ring,
+ int *bytes, int *pkts, int budget)
+{
+ /* This smp_load_acquire() pairs with smp_store_release() in
+ * hns3_tx_doorbell().
+ */
+ int ltu = smp_load_acquire(&ring->last_to_use);
+ int ntc = ring->next_to_clean;
+ struct hns3_desc_cb *desc_cb;
+ bool reclaimed = false;
+ struct hns3_desc *desc;
+
+ while (ltu != ntc) {
+ desc = &ring->desc[ntc];
+
+ if (le16_to_cpu(desc->tx.bdtp_fe_sc_vld_ra_ri) &
+ BIT(HNS3_TXD_VLD_B))
+ break;
+
+ desc_cb = &ring->desc_cb[ntc];
+
+ if (desc_cb->type & (DESC_TYPE_SKB | DESC_TYPE_BOUNCE_ALL |
+ DESC_TYPE_BOUNCE_HEAD |
+ DESC_TYPE_SGL_SKB)) {
+ (*pkts)++;
+ (*bytes) += desc_cb->send_bytes;
+ }
+
+ /* desc_cb will be cleaned, after hnae3_free_buffer_detach */
+ hns3_free_buffer_detach(ring, ntc, budget);
+
+ if (++ntc == ring->desc_num)
+ ntc = 0;
+
+ /* Issue prefetch for next Tx descriptor */
+ prefetch(&ring->desc_cb[ntc]);
+ reclaimed = true;
+ }
+
+ if (unlikely(!reclaimed))
+ return false;
+
+ /* This smp_store_release() pairs with smp_load_acquire() in
+ * ring_space called by hns3_nic_net_xmit.
+ */
+ smp_store_release(&ring->next_to_clean, ntc);
+
+ hns3_tx_spare_update(ring);
+
+ return true;
+}
+
+void hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget)
+{
+ struct net_device *netdev = ring_to_netdev(ring);
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct netdev_queue *dev_queue;
+ int bytes, pkts;
+
+ bytes = 0;
+ pkts = 0;
+
+ if (unlikely(!hns3_nic_reclaim_desc(ring, &bytes, &pkts, budget)))
+ return;
+
+ ring->tqp_vector->tx_group.total_bytes += bytes;
+ ring->tqp_vector->tx_group.total_packets += pkts;
+
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.tx_bytes += bytes;
+ ring->stats.tx_pkts += pkts;
+ u64_stats_update_end(&ring->syncp);
+
+ dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index);
+ netdev_tx_completed_queue(dev_queue, pkts, bytes);
+
+ if (unlikely(netif_carrier_ok(netdev) &&
+ ring_space(ring) > HNS3_MAX_TSO_BD_NUM)) {
+ /* Make sure that anybody stopping the queue after this
+ * sees the new next_to_clean.
+ */
+ smp_mb();
+ if (netif_tx_queue_stopped(dev_queue) &&
+ !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) {
+ netif_tx_wake_queue(dev_queue);
+ ring->stats.restart_queue++;
+ }
+ }
+}
+
+static int hns3_desc_unused(struct hns3_enet_ring *ring)
+{
+ int ntc = ring->next_to_clean;
+ int ntu = ring->next_to_use;
+
+ if (unlikely(ntc == ntu && !ring->desc_cb[ntc].refill))
+ return ring->desc_num;
+
+ return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
+}
+
+/* Return true if there is any allocation failure */
+static bool hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
+ int cleand_count)
+{
+ struct hns3_desc_cb *desc_cb;
+ struct hns3_desc_cb res_cbs;
+ int i, ret;
+
+ for (i = 0; i < cleand_count; i++) {
+ desc_cb = &ring->desc_cb[ring->next_to_use];
+ if (desc_cb->reuse_flag) {
+ hns3_ring_stats_update(ring, reuse_pg_cnt);
+
+ hns3_reuse_buffer(ring, ring->next_to_use);
+ } else {
+ ret = hns3_alloc_and_map_buffer(ring, &res_cbs);
+ if (ret) {
+ hns3_ring_stats_update(ring, sw_err_cnt);
+
+ hns3_rl_err(ring_to_netdev(ring),
+ "alloc rx buffer failed: %d\n",
+ ret);
+
+ writel(i, ring->tqp->io_base +
+ HNS3_RING_RX_RING_HEAD_REG);
+ return true;
+ }
+ hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
+
+ hns3_ring_stats_update(ring, non_reuse_pg);
+ }
+
+ ring_ptr_move_fw(ring, next_to_use);
+ }
+
+ writel(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
+ return false;
+}
+
+static bool hns3_can_reuse_page(struct hns3_desc_cb *cb)
+{
+ return page_count(cb->priv) == cb->pagecnt_bias;
+}
+
+static int hns3_handle_rx_copybreak(struct sk_buff *skb, int i,
+ struct hns3_enet_ring *ring,
+ int pull_len,
+ struct hns3_desc_cb *desc_cb)
+{
+ struct hns3_desc *desc = &ring->desc[ring->next_to_clean];
+ u32 frag_offset = desc_cb->page_offset + pull_len;
+ int size = le16_to_cpu(desc->rx.size);
+ u32 frag_size = size - pull_len;
+ void *frag = napi_alloc_frag(frag_size);
+
+ if (unlikely(!frag)) {
+ hns3_ring_stats_update(ring, frag_alloc_err);
+
+ hns3_rl_err(ring_to_netdev(ring),
+ "failed to allocate rx frag\n");
+ return -ENOMEM;
+ }
+
+ desc_cb->reuse_flag = 1;
+ memcpy(frag, desc_cb->buf + frag_offset, frag_size);
+ skb_add_rx_frag(skb, i, virt_to_page(frag),
+ offset_in_page(frag), frag_size, frag_size);
+
+ hns3_ring_stats_update(ring, frag_alloc);
+ return 0;
+}
+
+static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
+ struct hns3_enet_ring *ring, int pull_len,
+ struct hns3_desc_cb *desc_cb)
+{
+ struct hns3_desc *desc = &ring->desc[ring->next_to_clean];
+ u32 frag_offset = desc_cb->page_offset + pull_len;
+ int size = le16_to_cpu(desc->rx.size);
+ u32 truesize = hns3_buf_size(ring);
+ u32 frag_size = size - pull_len;
+ int ret = 0;
+ bool reused;
+
+ if (ring->page_pool) {
+ skb_add_rx_frag(skb, i, desc_cb->priv, frag_offset,
+ frag_size, truesize);
+ return;
+ }
+
+ /* Avoid re-using remote or pfmem page */
+ if (unlikely(!dev_page_is_reusable(desc_cb->priv)))
+ goto out;
+
+ reused = hns3_can_reuse_page(desc_cb);
+
+ /* Rx page can be reused when:
+ * 1. Rx page is only owned by the driver when page_offset
+ * is zero, which means 0 @ truesize will be used by
+ * stack after skb_add_rx_frag() is called, and the rest
+ * of rx page can be reused by driver.
+ * Or
+ * 2. Rx page is only owned by the driver when page_offset
+ * is non-zero, which means page_offset @ truesize will
+ * be used by stack after skb_add_rx_frag() is called,
+ * and 0 @ truesize can be reused by driver.
+ */
+ if ((!desc_cb->page_offset && reused) ||
+ ((desc_cb->page_offset + truesize + truesize) <=
+ hns3_page_size(ring) && desc_cb->page_offset)) {
+ desc_cb->page_offset += truesize;
+ desc_cb->reuse_flag = 1;
+ } else if (desc_cb->page_offset && reused) {
+ desc_cb->page_offset = 0;
+ desc_cb->reuse_flag = 1;
+ } else if (frag_size <= ring->rx_copybreak) {
+ ret = hns3_handle_rx_copybreak(skb, i, ring, pull_len, desc_cb);
+ if (!ret)
+ return;
+ }
+
+out:
+ desc_cb->pagecnt_bias--;
+
+ if (unlikely(!desc_cb->pagecnt_bias)) {
+ page_ref_add(desc_cb->priv, USHRT_MAX);
+ desc_cb->pagecnt_bias = USHRT_MAX;
+ }
+
+ skb_add_rx_frag(skb, i, desc_cb->priv, frag_offset,
+ frag_size, truesize);
+
+ if (unlikely(!desc_cb->reuse_flag))
+ __page_frag_cache_drain(desc_cb->priv, desc_cb->pagecnt_bias);
+}
+
+static int hns3_gro_complete(struct sk_buff *skb, u32 l234info)
+{
+ __be16 type = skb->protocol;
+ struct tcphdr *th;
+ int depth = 0;
+
+ while (eth_type_vlan(type)) {
+ struct vlan_hdr *vh;
+
+ if ((depth + VLAN_HLEN) > skb_headlen(skb))
+ return -EFAULT;
+
+ vh = (struct vlan_hdr *)(skb->data + depth);
+ type = vh->h_vlan_encapsulated_proto;
+ depth += VLAN_HLEN;
+ }
+
+ skb_set_network_header(skb, depth);
+
+ if (type == htons(ETH_P_IP)) {
+ const struct iphdr *iph = ip_hdr(skb);
+
+ depth += sizeof(struct iphdr);
+ skb_set_transport_header(skb, depth);
+ th = tcp_hdr(skb);
+ th->check = ~tcp_v4_check(skb->len - depth, iph->saddr,
+ iph->daddr, 0);
+ } else if (type == htons(ETH_P_IPV6)) {
+ const struct ipv6hdr *iph = ipv6_hdr(skb);
+
+ depth += sizeof(struct ipv6hdr);
+ skb_set_transport_header(skb, depth);
+ th = tcp_hdr(skb);
+ th->check = ~tcp_v6_check(skb->len - depth, &iph->saddr,
+ &iph->daddr, 0);
+ } else {
+ hns3_rl_err(skb->dev,
+ "Error: FW GRO supports only IPv4/IPv6, not 0x%04x, depth: %d\n",
+ be16_to_cpu(type), depth);
+ return -EFAULT;
+ }
+
+ skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
+ if (th->cwr)
+ skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
+
+ if (l234info & BIT(HNS3_RXD_GRO_FIXID_B))
+ skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID;
+
+ skb->csum_start = (unsigned char *)th - skb->head;
+ skb->csum_offset = offsetof(struct tcphdr, check);
+ skb->ip_summed = CHECKSUM_PARTIAL;
+
+ trace_hns3_gro(skb);
+
+ return 0;
+}
+
+static void hns3_checksum_complete(struct hns3_enet_ring *ring,
+ struct sk_buff *skb, u32 ptype, u16 csum)
+{
+ if (ptype == HNS3_INVALID_PTYPE ||
+ hns3_rx_ptype_tbl[ptype].ip_summed != CHECKSUM_COMPLETE)
+ return;
+
+ hns3_ring_stats_update(ring, csum_complete);
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ skb->csum = csum_unfold((__force __sum16)csum);
+}
+
+static void hns3_rx_handle_csum(struct sk_buff *skb, u32 l234info,
+ u32 ol_info, u32 ptype)
+{
+ int l3_type, l4_type;
+ int ol4_type;
+
+ if (ptype != HNS3_INVALID_PTYPE) {
+ skb->csum_level = hns3_rx_ptype_tbl[ptype].csum_level;
+ skb->ip_summed = hns3_rx_ptype_tbl[ptype].ip_summed;
+
+ return;
+ }
+
+ ol4_type = hnae3_get_field(ol_info, HNS3_RXD_OL4ID_M,
+ HNS3_RXD_OL4ID_S);
+ switch (ol4_type) {
+ case HNS3_OL4_TYPE_MAC_IN_UDP:
+ case HNS3_OL4_TYPE_NVGRE:
+ skb->csum_level = 1;
+ fallthrough;
+ case HNS3_OL4_TYPE_NO_TUN:
+ l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
+ HNS3_RXD_L3ID_S);
+ l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M,
+ HNS3_RXD_L4ID_S);
+ /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */
+ if ((l3_type == HNS3_L3_TYPE_IPV4 ||
+ l3_type == HNS3_L3_TYPE_IPV6) &&
+ (l4_type == HNS3_L4_TYPE_UDP ||
+ l4_type == HNS3_L4_TYPE_TCP ||
+ l4_type == HNS3_L4_TYPE_SCTP))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ break;
+ default:
+ break;
+ }
+}
+
+static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
+ u32 l234info, u32 bd_base_info, u32 ol_info,
+ u16 csum)
+{
+ struct net_device *netdev = ring_to_netdev(ring);
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ u32 ptype = HNS3_INVALID_PTYPE;
+
+ skb->ip_summed = CHECKSUM_NONE;
+
+ skb_checksum_none_assert(skb);
+
+ if (!(netdev->features & NETIF_F_RXCSUM))
+ return;
+
+ if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state))
+ ptype = hnae3_get_field(ol_info, HNS3_RXD_PTYPE_M,
+ HNS3_RXD_PTYPE_S);
+
+ hns3_checksum_complete(ring, skb, ptype, csum);
+
+ /* check if hardware has done checksum */
+ if (!(bd_base_info & BIT(HNS3_RXD_L3L4P_B)))
+ return;
+
+ if (unlikely(l234info & (BIT(HNS3_RXD_L3E_B) | BIT(HNS3_RXD_L4E_B) |
+ BIT(HNS3_RXD_OL3E_B) |
+ BIT(HNS3_RXD_OL4E_B)))) {
+ skb->ip_summed = CHECKSUM_NONE;
+ hns3_ring_stats_update(ring, l3l4_csum_err);
+
+ return;
+ }
+
+ hns3_rx_handle_csum(skb, l234info, ol_info, ptype);
+}
+
+static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb)
+{
+ if (skb_has_frag_list(skb))
+ napi_gro_flush(&ring->tqp_vector->napi, false);
+
+ napi_gro_receive(&ring->tqp_vector->napi, skb);
+}
+
+static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
+ struct hns3_desc *desc, u32 l234info,
+ u16 *vlan_tag)
+{
+ struct hnae3_handle *handle = ring->tqp->handle;
+ struct pci_dev *pdev = ring->tqp->handle->pdev;
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+
+ if (unlikely(ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)) {
+ *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
+ if (!(*vlan_tag & VLAN_VID_MASK))
+ *vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
+
+ return (*vlan_tag != 0);
+ }
+
+#define HNS3_STRP_OUTER_VLAN 0x1
+#define HNS3_STRP_INNER_VLAN 0x2
+#define HNS3_STRP_BOTH 0x3
+
+ /* Hardware always insert VLAN tag into RX descriptor when
+ * remove the tag from packet, driver needs to determine
+ * reporting which tag to stack.
+ */
+ switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M,
+ HNS3_RXD_STRP_TAGP_S)) {
+ case HNS3_STRP_OUTER_VLAN:
+ if (handle->port_base_vlan_state !=
+ HNAE3_PORT_BASE_VLAN_DISABLE)
+ return false;
+
+ *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
+ return true;
+ case HNS3_STRP_INNER_VLAN:
+ if (handle->port_base_vlan_state !=
+ HNAE3_PORT_BASE_VLAN_DISABLE)
+ return false;
+
+ *vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
+ return true;
+ case HNS3_STRP_BOTH:
+ if (handle->port_base_vlan_state ==
+ HNAE3_PORT_BASE_VLAN_DISABLE)
+ *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
+ else
+ *vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
+
+ return true;
+ default:
+ return false;
+ }
+}
+
+static void hns3_rx_ring_move_fw(struct hns3_enet_ring *ring)
+{
+ ring->desc[ring->next_to_clean].rx.bd_base_info &=
+ cpu_to_le32(~BIT(HNS3_RXD_VLD_B));
+ ring->desc_cb[ring->next_to_clean].refill = 0;
+ ring->next_to_clean += 1;
+
+ if (unlikely(ring->next_to_clean == ring->desc_num))
+ ring->next_to_clean = 0;
+}
+
+static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
+ unsigned char *va)
+{
+ struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
+ struct net_device *netdev = ring_to_netdev(ring);
+ struct sk_buff *skb;
+
+ ring->skb = napi_alloc_skb(&ring->tqp_vector->napi, HNS3_RX_HEAD_SIZE);
+ skb = ring->skb;
+ if (unlikely(!skb)) {
+ hns3_rl_err(netdev, "alloc rx skb fail\n");
+ hns3_ring_stats_update(ring, sw_err_cnt);
+
+ return -ENOMEM;
+ }
+
+ trace_hns3_rx_desc(ring);
+ prefetchw(skb->data);
+
+ ring->pending_buf = 1;
+ ring->frag_num = 0;
+ ring->tail_skb = NULL;
+ if (length <= HNS3_RX_HEAD_SIZE) {
+ memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
+
+ /* We can reuse buffer as-is, just make sure it is reusable */
+ if (dev_page_is_reusable(desc_cb->priv))
+ desc_cb->reuse_flag = 1;
+ else if (desc_cb->type & DESC_TYPE_PP_FRAG)
+ page_pool_put_full_page(ring->page_pool, desc_cb->priv,
+ false);
+ else /* This page cannot be reused so discard it */
+ __page_frag_cache_drain(desc_cb->priv,
+ desc_cb->pagecnt_bias);
+
+ hns3_rx_ring_move_fw(ring);
+ return 0;
+ }
+
+ if (ring->page_pool)
+ skb_mark_for_recycle(skb);
+
+ hns3_ring_stats_update(ring, seg_pkt_cnt);
+
+ ring->pull_len = eth_get_headlen(netdev, va, HNS3_RX_HEAD_SIZE);
+ __skb_put(skb, ring->pull_len);
+ hns3_nic_reuse_page(skb, ring->frag_num++, ring, ring->pull_len,
+ desc_cb);
+ hns3_rx_ring_move_fw(ring);
+
+ return 0;
+}
+
+static int hns3_add_frag(struct hns3_enet_ring *ring)
+{
+ struct sk_buff *skb = ring->skb;
+ struct sk_buff *head_skb = skb;
+ struct sk_buff *new_skb;
+ struct hns3_desc_cb *desc_cb;
+ struct hns3_desc *desc;
+ u32 bd_base_info;
+
+ do {
+ desc = &ring->desc[ring->next_to_clean];
+ desc_cb = &ring->desc_cb[ring->next_to_clean];
+ bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
+ /* make sure HW write desc complete */
+ dma_rmb();
+ if (!(bd_base_info & BIT(HNS3_RXD_VLD_B)))
+ return -ENXIO;
+
+ if (unlikely(ring->frag_num >= MAX_SKB_FRAGS)) {
+ new_skb = napi_alloc_skb(&ring->tqp_vector->napi, 0);
+ if (unlikely(!new_skb)) {
+ hns3_rl_err(ring_to_netdev(ring),
+ "alloc rx fraglist skb fail\n");
+ return -ENXIO;
+ }
+
+ if (ring->page_pool)
+ skb_mark_for_recycle(new_skb);
+
+ ring->frag_num = 0;
+
+ if (ring->tail_skb) {
+ ring->tail_skb->next = new_skb;
+ ring->tail_skb = new_skb;
+ } else {
+ skb_shinfo(skb)->frag_list = new_skb;
+ ring->tail_skb = new_skb;
+ }
+ }
+
+ if (ring->tail_skb) {
+ head_skb->truesize += hns3_buf_size(ring);
+ head_skb->data_len += le16_to_cpu(desc->rx.size);
+ head_skb->len += le16_to_cpu(desc->rx.size);
+ skb = ring->tail_skb;
+ }
+
+ dma_sync_single_for_cpu(ring_to_dev(ring),
+ desc_cb->dma + desc_cb->page_offset,
+ hns3_buf_size(ring),
+ DMA_FROM_DEVICE);
+
+ hns3_nic_reuse_page(skb, ring->frag_num++, ring, 0, desc_cb);
+ trace_hns3_rx_desc(ring);
+ hns3_rx_ring_move_fw(ring);
+ ring->pending_buf++;
+ } while (!(bd_base_info & BIT(HNS3_RXD_FE_B)));
+
+ return 0;
+}
+
+static int hns3_set_gro_and_checksum(struct hns3_enet_ring *ring,
+ struct sk_buff *skb, u32 l234info,
+ u32 bd_base_info, u32 ol_info, u16 csum)
+{
+ struct net_device *netdev = ring_to_netdev(ring);
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ u32 l3_type;
+
+ skb_shinfo(skb)->gso_size = hnae3_get_field(bd_base_info,
+ HNS3_RXD_GRO_SIZE_M,
+ HNS3_RXD_GRO_SIZE_S);
+ /* if there is no HW GRO, do not set gro params */
+ if (!skb_shinfo(skb)->gso_size) {
+ hns3_rx_checksum(ring, skb, l234info, bd_base_info, ol_info,
+ csum);
+ return 0;
+ }
+
+ NAPI_GRO_CB(skb)->count = hnae3_get_field(l234info,
+ HNS3_RXD_GRO_COUNT_M,
+ HNS3_RXD_GRO_COUNT_S);
+
+ if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state)) {
+ u32 ptype = hnae3_get_field(ol_info, HNS3_RXD_PTYPE_M,
+ HNS3_RXD_PTYPE_S);
+
+ l3_type = hns3_rx_ptype_tbl[ptype].l3_type;
+ } else {
+ l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
+ HNS3_RXD_L3ID_S);
+ }
+
+ if (l3_type == HNS3_L3_TYPE_IPV4)
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+ else if (l3_type == HNS3_L3_TYPE_IPV6)
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+ else
+ return -EFAULT;
+
+ return hns3_gro_complete(skb, l234info);
+}
+
+static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
+ struct sk_buff *skb, u32 rss_hash,
+ u32 l234info, u32 ol_info)
+{
+ enum pkt_hash_types rss_type = PKT_HASH_TYPE_NONE;
+ struct net_device *netdev = ring_to_netdev(ring);
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+
+ if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state)) {
+ u32 ptype = hnae3_get_field(ol_info, HNS3_RXD_PTYPE_M,
+ HNS3_RXD_PTYPE_S);
+
+ rss_type = hns3_rx_ptype_tbl[ptype].hash_type;
+ } else {
+ int l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
+ HNS3_RXD_L3ID_S);
+ int l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M,
+ HNS3_RXD_L4ID_S);
+
+ if (l3_type == HNS3_L3_TYPE_IPV4 ||
+ l3_type == HNS3_L3_TYPE_IPV6) {
+ if (l4_type == HNS3_L4_TYPE_UDP ||
+ l4_type == HNS3_L4_TYPE_TCP ||
+ l4_type == HNS3_L4_TYPE_SCTP)
+ rss_type = PKT_HASH_TYPE_L4;
+ else if (l4_type == HNS3_L4_TYPE_IGMP ||
+ l4_type == HNS3_L4_TYPE_ICMP)
+ rss_type = PKT_HASH_TYPE_L3;
+ }
+ }
+
+ skb_set_hash(skb, rss_hash, rss_type);
+}
+
+static void hns3_handle_rx_ts_info(struct net_device *netdev,
+ struct hns3_desc *desc, struct sk_buff *skb,
+ u32 bd_base_info)
+{
+ if (unlikely(bd_base_info & BIT(HNS3_RXD_TS_VLD_B))) {
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ u32 nsec = le32_to_cpu(desc->ts_nsec);
+ u32 sec = le32_to_cpu(desc->ts_sec);
+
+ if (h->ae_algo->ops->get_rx_hwts)
+ h->ae_algo->ops->get_rx_hwts(h, skb, nsec, sec);
+ }
+}
+
+static void hns3_handle_rx_vlan_tag(struct hns3_enet_ring *ring,
+ struct hns3_desc *desc, struct sk_buff *skb,
+ u32 l234info)
+{
+ struct net_device *netdev = ring_to_netdev(ring);
+
+ /* Based on hw strategy, the tag offloaded will be stored at
+ * ot_vlan_tag in two layer tag case, and stored at vlan_tag
+ * in one layer tag case.
+ */
+ if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
+ u16 vlan_tag;
+
+ if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag))
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ vlan_tag);
+ }
+}
+
+static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb)
+{
+ struct net_device *netdev = ring_to_netdev(ring);
+ enum hns3_pkt_l2t_type l2_frame_type;
+ u32 bd_base_info, l234info, ol_info;
+ struct hns3_desc *desc;
+ unsigned int len;
+ int pre_ntc, ret;
+ u16 csum;
+
+ /* bdinfo handled below is only valid on the last BD of the
+ * current packet, and ring->next_to_clean indicates the first
+ * descriptor of next packet, so need - 1 below.
+ */
+ pre_ntc = ring->next_to_clean ? (ring->next_to_clean - 1) :
+ (ring->desc_num - 1);
+ desc = &ring->desc[pre_ntc];
+ bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
+ l234info = le32_to_cpu(desc->rx.l234_info);
+ ol_info = le32_to_cpu(desc->rx.ol_info);
+ csum = le16_to_cpu(desc->csum);
+
+ hns3_handle_rx_ts_info(netdev, desc, skb, bd_base_info);
+
+ hns3_handle_rx_vlan_tag(ring, desc, skb, l234info);
+
+ if (unlikely(!desc->rx.pkt_len || (l234info & (BIT(HNS3_RXD_TRUNCAT_B) |
+ BIT(HNS3_RXD_L2E_B))))) {
+ u64_stats_update_begin(&ring->syncp);
+ if (l234info & BIT(HNS3_RXD_L2E_B))
+ ring->stats.l2_err++;
+ else
+ ring->stats.err_pkt_len++;
+ u64_stats_update_end(&ring->syncp);
+
+ return -EFAULT;
+ }
+
+ len = skb->len;
+
+ /* Do update ip stack process */
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ /* This is needed in order to enable forwarding support */
+ ret = hns3_set_gro_and_checksum(ring, skb, l234info,
+ bd_base_info, ol_info, csum);
+ if (unlikely(ret)) {
+ hns3_ring_stats_update(ring, rx_err_cnt);
+ return ret;
+ }
+
+ l2_frame_type = hnae3_get_field(l234info, HNS3_RXD_DMAC_M,
+ HNS3_RXD_DMAC_S);
+
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.rx_pkts++;
+ ring->stats.rx_bytes += len;
+
+ if (l2_frame_type == HNS3_L2_TYPE_MULTICAST)
+ ring->stats.rx_multicast++;
+
+ u64_stats_update_end(&ring->syncp);
+
+ ring->tqp_vector->rx_group.total_bytes += len;
+
+ hns3_set_rx_skb_rss_type(ring, skb, le32_to_cpu(desc->rx.rss_hash),
+ l234info, ol_info);
+ return 0;
+}
+
+static int hns3_handle_rx_bd(struct hns3_enet_ring *ring)
+{
+ struct sk_buff *skb = ring->skb;
+ struct hns3_desc_cb *desc_cb;
+ struct hns3_desc *desc;
+ unsigned int length;
+ u32 bd_base_info;
+ int ret;
+
+ desc = &ring->desc[ring->next_to_clean];
+ desc_cb = &ring->desc_cb[ring->next_to_clean];
+
+ prefetch(desc);
+
+ if (!skb) {
+ bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
+ /* Check valid BD */
+ if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B))))
+ return -ENXIO;
+
+ dma_rmb();
+ length = le16_to_cpu(desc->rx.size);
+
+ ring->va = desc_cb->buf + desc_cb->page_offset;
+
+ dma_sync_single_for_cpu(ring_to_dev(ring),
+ desc_cb->dma + desc_cb->page_offset,
+ hns3_buf_size(ring),
+ DMA_FROM_DEVICE);
+
+ /* Prefetch first cache line of first page.
+ * Idea is to cache few bytes of the header of the packet.
+ * Our L1 Cache line size is 64B so need to prefetch twice to make
+ * it 128B. But in actual we can have greater size of caches with
+ * 128B Level 1 cache lines. In such a case, single fetch would
+ * suffice to cache in the relevant part of the header.
+ */
+ net_prefetch(ring->va);
+
+ ret = hns3_alloc_skb(ring, length, ring->va);
+ skb = ring->skb;
+
+ if (ret < 0) /* alloc buffer fail */
+ return ret;
+ if (!(bd_base_info & BIT(HNS3_RXD_FE_B))) { /* need add frag */
+ ret = hns3_add_frag(ring);
+ if (ret)
+ return ret;
+ }
+ } else {
+ ret = hns3_add_frag(ring);
+ if (ret)
+ return ret;
+ }
+
+ /* As the head data may be changed when GRO enable, copy
+ * the head data in after other data rx completed
+ */
+ if (skb->len > HNS3_RX_HEAD_SIZE)
+ memcpy(skb->data, ring->va,
+ ALIGN(ring->pull_len, sizeof(long)));
+
+ ret = hns3_handle_bdinfo(ring, skb);
+ if (unlikely(ret)) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ skb_record_rx_queue(skb, ring->tqp->tqp_index);
+ return 0;
+}
+
+int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
+ void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *))
+{
+#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
+ int unused_count = hns3_desc_unused(ring);
+ bool failure = false;
+ int recv_pkts = 0;
+ int err;
+
+ unused_count -= ring->pending_buf;
+
+ while (recv_pkts < budget) {
+ /* Reuse or realloc buffers */
+ if (unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
+ failure = failure ||
+ hns3_nic_alloc_rx_buffers(ring, unused_count);
+ unused_count = 0;
+ }
+
+ /* Poll one pkt */
+ err = hns3_handle_rx_bd(ring);
+ /* Do not get FE for the packet or failed to alloc skb */
+ if (unlikely(!ring->skb || err == -ENXIO)) {
+ goto out;
+ } else if (likely(!err)) {
+ rx_fn(ring, ring->skb);
+ recv_pkts++;
+ }
+
+ unused_count += ring->pending_buf;
+ ring->skb = NULL;
+ ring->pending_buf = 0;
+ }
+
+out:
+ /* sync head pointer before exiting, since hardware will calculate
+ * FBD number with head pointer
+ */
+ if (unused_count > 0)
+ failure = failure ||
+ hns3_nic_alloc_rx_buffers(ring, unused_count);
+
+ return failure ? budget : recv_pkts;
+}
+
+static void hns3_update_rx_int_coalesce(struct hns3_enet_tqp_vector *tqp_vector)
+{
+ struct hns3_enet_ring_group *rx_group = &tqp_vector->rx_group;
+ struct dim_sample sample = {};
+
+ if (!rx_group->coal.adapt_enable)
+ return;
+
+ dim_update_sample(tqp_vector->event_cnt, rx_group->total_packets,
+ rx_group->total_bytes, &sample);
+ net_dim(&rx_group->dim, sample);
+}
+
+static void hns3_update_tx_int_coalesce(struct hns3_enet_tqp_vector *tqp_vector)
+{
+ struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group;
+ struct dim_sample sample = {};
+
+ if (!tx_group->coal.adapt_enable)
+ return;
+
+ dim_update_sample(tqp_vector->event_cnt, tx_group->total_packets,
+ tx_group->total_bytes, &sample);
+ net_dim(&tx_group->dim, sample);
+}
+
+static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
+{
+ struct hns3_nic_priv *priv = netdev_priv(napi->dev);
+ struct hns3_enet_ring *ring;
+ int rx_pkt_total = 0;
+
+ struct hns3_enet_tqp_vector *tqp_vector =
+ container_of(napi, struct hns3_enet_tqp_vector, napi);
+ bool clean_complete = true;
+ int rx_budget = budget;
+
+ if (unlikely(test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) {
+ napi_complete(napi);
+ return 0;
+ }
+
+ /* Since the actual Tx work is minimal, we can give the Tx a larger
+ * budget and be more aggressive about cleaning up the Tx descriptors.
+ */
+ hns3_for_each_ring(ring, tqp_vector->tx_group)
+ hns3_clean_tx_ring(ring, budget);
+
+ /* make sure rx ring budget not smaller than 1 */
+ if (tqp_vector->num_tqps > 1)
+ rx_budget = max(budget / tqp_vector->num_tqps, 1);
+
+ hns3_for_each_ring(ring, tqp_vector->rx_group) {
+ int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget,
+ hns3_rx_skb);
+ if (rx_cleaned >= rx_budget)
+ clean_complete = false;
+
+ rx_pkt_total += rx_cleaned;
+ }
+
+ tqp_vector->rx_group.total_packets += rx_pkt_total;
+
+ if (!clean_complete)
+ return budget;
+
+ if (napi_complete(napi) &&
+ likely(!test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) {
+ hns3_update_rx_int_coalesce(tqp_vector);
+ hns3_update_tx_int_coalesce(tqp_vector);
+
+ hns3_mask_vector_irq(tqp_vector, 1);
+ }
+
+ return rx_pkt_total;
+}
+
+static int hns3_create_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
+ struct hnae3_ring_chain_node **head,
+ bool is_tx)
+{
+ u32 bit_value = is_tx ? HNAE3_RING_TYPE_TX : HNAE3_RING_TYPE_RX;
+ u32 field_value = is_tx ? HNAE3_RING_GL_TX : HNAE3_RING_GL_RX;
+ struct hnae3_ring_chain_node *cur_chain = *head;
+ struct pci_dev *pdev = tqp_vector->handle->pdev;
+ struct hnae3_ring_chain_node *chain;
+ struct hns3_enet_ring *ring;
+
+ ring = is_tx ? tqp_vector->tx_group.ring : tqp_vector->rx_group.ring;
+
+ if (cur_chain) {
+ while (cur_chain->next)
+ cur_chain = cur_chain->next;
+ }
+
+ while (ring) {
+ chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL);
+ if (!chain)
+ return -ENOMEM;
+ if (cur_chain)
+ cur_chain->next = chain;
+ else
+ *head = chain;
+ chain->tqp_index = ring->tqp->tqp_index;
+ hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
+ bit_value);
+ hnae3_set_field(chain->int_gl_idx,
+ HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S, field_value);
+
+ cur_chain = chain;
+
+ ring = ring->next;
+ }
+
+ return 0;
+}
+
+static struct hnae3_ring_chain_node *
+hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector)
+{
+ struct pci_dev *pdev = tqp_vector->handle->pdev;
+ struct hnae3_ring_chain_node *cur_chain = NULL;
+ struct hnae3_ring_chain_node *chain;
+
+ if (hns3_create_ring_chain(tqp_vector, &cur_chain, true))
+ goto err_free_chain;
+
+ if (hns3_create_ring_chain(tqp_vector, &cur_chain, false))
+ goto err_free_chain;
+
+ return cur_chain;
+
+err_free_chain:
+ while (cur_chain) {
+ chain = cur_chain->next;
+ devm_kfree(&pdev->dev, cur_chain);
+ cur_chain = chain;
+ }
+
+ return NULL;
+}
+
+static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
+ struct hnae3_ring_chain_node *head)
+{
+ struct pci_dev *pdev = tqp_vector->handle->pdev;
+ struct hnae3_ring_chain_node *chain_tmp, *chain;
+
+ chain = head;
+
+ while (chain) {
+ chain_tmp = chain->next;
+ devm_kfree(&pdev->dev, chain);
+ chain = chain_tmp;
+ }
+}
+
+static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group,
+ struct hns3_enet_ring *ring)
+{
+ ring->next = group->ring;
+ group->ring = ring;
+
+ group->count++;
+}
+
+static void hns3_nic_set_cpumask(struct hns3_nic_priv *priv)
+{
+ struct pci_dev *pdev = priv->ae_handle->pdev;
+ struct hns3_enet_tqp_vector *tqp_vector;
+ int num_vectors = priv->vector_num;
+ int numa_node;
+ int vector_i;
+
+ numa_node = dev_to_node(&pdev->dev);
+
+ for (vector_i = 0; vector_i < num_vectors; vector_i++) {
+ tqp_vector = &priv->tqp_vector[vector_i];
+ cpumask_set_cpu(cpumask_local_spread(vector_i, numa_node),
+ &tqp_vector->affinity_mask);
+ }
+}
+
+static void hns3_rx_dim_work(struct work_struct *work)
+{
+ struct dim *dim = container_of(work, struct dim, work);
+ struct hns3_enet_ring_group *group = container_of(dim,
+ struct hns3_enet_ring_group, dim);
+ struct hns3_enet_tqp_vector *tqp_vector = group->ring->tqp_vector;
+ struct dim_cq_moder cur_moder =
+ net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
+
+ hns3_set_vector_coalesce_rx_gl(group->ring->tqp_vector, cur_moder.usec);
+ tqp_vector->rx_group.coal.int_gl = cur_moder.usec;
+
+ if (cur_moder.pkts < tqp_vector->rx_group.coal.int_ql_max) {
+ hns3_set_vector_coalesce_rx_ql(tqp_vector, cur_moder.pkts);
+ tqp_vector->rx_group.coal.int_ql = cur_moder.pkts;
+ }
+
+ dim->state = DIM_START_MEASURE;
+}
+
+static void hns3_tx_dim_work(struct work_struct *work)
+{
+ struct dim *dim = container_of(work, struct dim, work);
+ struct hns3_enet_ring_group *group = container_of(dim,
+ struct hns3_enet_ring_group, dim);
+ struct hns3_enet_tqp_vector *tqp_vector = group->ring->tqp_vector;
+ struct dim_cq_moder cur_moder =
+ net_dim_get_tx_moderation(dim->mode, dim->profile_ix);
+
+ hns3_set_vector_coalesce_tx_gl(tqp_vector, cur_moder.usec);
+ tqp_vector->tx_group.coal.int_gl = cur_moder.usec;
+
+ if (cur_moder.pkts < tqp_vector->tx_group.coal.int_ql_max) {
+ hns3_set_vector_coalesce_tx_ql(tqp_vector, cur_moder.pkts);
+ tqp_vector->tx_group.coal.int_ql = cur_moder.pkts;
+ }
+
+ dim->state = DIM_START_MEASURE;
+}
+
+static void hns3_nic_init_dim(struct hns3_enet_tqp_vector *tqp_vector)
+{
+ INIT_WORK(&tqp_vector->rx_group.dim.work, hns3_rx_dim_work);
+ INIT_WORK(&tqp_vector->tx_group.dim.work, hns3_tx_dim_work);
+}
+
+static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
+{
+ struct hnae3_handle *h = priv->ae_handle;
+ struct hns3_enet_tqp_vector *tqp_vector;
+ int ret;
+ int i;
+
+ hns3_nic_set_cpumask(priv);
+
+ for (i = 0; i < priv->vector_num; i++) {
+ tqp_vector = &priv->tqp_vector[i];
+ hns3_vector_coalesce_init_hw(tqp_vector, priv);
+ tqp_vector->num_tqps = 0;
+ hns3_nic_init_dim(tqp_vector);
+ }
+
+ for (i = 0; i < h->kinfo.num_tqps; i++) {
+ u16 vector_i = i % priv->vector_num;
+ u16 tqp_num = h->kinfo.num_tqps;
+
+ tqp_vector = &priv->tqp_vector[vector_i];
+
+ hns3_add_ring_to_group(&tqp_vector->tx_group,
+ &priv->ring[i]);
+
+ hns3_add_ring_to_group(&tqp_vector->rx_group,
+ &priv->ring[i + tqp_num]);
+
+ priv->ring[i].tqp_vector = tqp_vector;
+ priv->ring[i + tqp_num].tqp_vector = tqp_vector;
+ tqp_vector->num_tqps++;
+ }
+
+ for (i = 0; i < priv->vector_num; i++) {
+ struct hnae3_ring_chain_node *vector_ring_chain;
+
+ tqp_vector = &priv->tqp_vector[i];
+
+ tqp_vector->rx_group.total_bytes = 0;
+ tqp_vector->rx_group.total_packets = 0;
+ tqp_vector->tx_group.total_bytes = 0;
+ tqp_vector->tx_group.total_packets = 0;
+ tqp_vector->handle = h;
+
+ vector_ring_chain = hns3_get_vector_ring_chain(tqp_vector);
+ if (!vector_ring_chain) {
+ ret = -ENOMEM;
+ goto map_ring_fail;
+ }
+
+ ret = h->ae_algo->ops->map_ring_to_vector(h,
+ tqp_vector->vector_irq, vector_ring_chain);
+
+ hns3_free_vector_ring_chain(tqp_vector, vector_ring_chain);
+
+ if (ret)
+ goto map_ring_fail;
+
+ netif_napi_add(priv->netdev, &tqp_vector->napi,
+ hns3_nic_common_poll);
+ }
+
+ return 0;
+
+map_ring_fail:
+ while (i--)
+ netif_napi_del(&priv->tqp_vector[i].napi);
+
+ return ret;
+}
+
+static void hns3_nic_init_coal_cfg(struct hns3_nic_priv *priv)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
+ struct hns3_enet_coalesce *tx_coal = &priv->tx_coal;
+ struct hns3_enet_coalesce *rx_coal = &priv->rx_coal;
+
+ /* initialize the configuration for interrupt coalescing.
+ * 1. GL (Interrupt Gap Limiter)
+ * 2. RL (Interrupt Rate Limiter)
+ * 3. QL (Interrupt Quantity Limiter)
+ *
+ * Default: enable interrupt coalescing self-adaptive and GL
+ */
+ tx_coal->adapt_enable = 1;
+ rx_coal->adapt_enable = 1;
+
+ tx_coal->int_gl = HNS3_INT_GL_50K;
+ rx_coal->int_gl = HNS3_INT_GL_50K;
+
+ rx_coal->flow_level = HNS3_FLOW_LOW;
+ tx_coal->flow_level = HNS3_FLOW_LOW;
+
+ if (ae_dev->dev_specs.int_ql_max) {
+ tx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG;
+ rx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG;
+ }
+}
+
+static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv)
+{
+ struct hnae3_handle *h = priv->ae_handle;
+ struct hns3_enet_tqp_vector *tqp_vector;
+ struct hnae3_vector_info *vector;
+ struct pci_dev *pdev = h->pdev;
+ u16 tqp_num = h->kinfo.num_tqps;
+ u16 vector_num;
+ int ret = 0;
+ u16 i;
+
+ /* RSS size, cpu online and vector_num should be the same */
+ /* Should consider 2p/4p later */
+ vector_num = min_t(u16, num_online_cpus(), tqp_num);
+
+ vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector),
+ GFP_KERNEL);
+ if (!vector)
+ return -ENOMEM;
+
+ /* save the actual available vector number */
+ vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector);
+
+ priv->vector_num = vector_num;
+ priv->tqp_vector = (struct hns3_enet_tqp_vector *)
+ devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector),
+ GFP_KERNEL);
+ if (!priv->tqp_vector) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < priv->vector_num; i++) {
+ tqp_vector = &priv->tqp_vector[i];
+ tqp_vector->idx = i;
+ tqp_vector->mask_addr = vector[i].io_addr;
+ tqp_vector->vector_irq = vector[i].vector;
+ hns3_vector_coalesce_init(tqp_vector, priv);
+ }
+
+out:
+ devm_kfree(&pdev->dev, vector);
+ return ret;
+}
+
+static void hns3_clear_ring_group(struct hns3_enet_ring_group *group)
+{
+ group->ring = NULL;
+ group->count = 0;
+}
+
+static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
+{
+ struct hnae3_ring_chain_node *vector_ring_chain;
+ struct hnae3_handle *h = priv->ae_handle;
+ struct hns3_enet_tqp_vector *tqp_vector;
+ int i;
+
+ for (i = 0; i < priv->vector_num; i++) {
+ tqp_vector = &priv->tqp_vector[i];
+
+ if (!tqp_vector->rx_group.ring && !tqp_vector->tx_group.ring)
+ continue;
+
+ /* Since the mapping can be overwritten, when fail to get the
+ * chain between vector and ring, we should go on to deal with
+ * the remaining options.
+ */
+ vector_ring_chain = hns3_get_vector_ring_chain(tqp_vector);
+ if (!vector_ring_chain)
+ dev_warn(priv->dev, "failed to get ring chain\n");
+
+ h->ae_algo->ops->unmap_ring_from_vector(h,
+ tqp_vector->vector_irq, vector_ring_chain);
+
+ hns3_free_vector_ring_chain(tqp_vector, vector_ring_chain);
+
+ hns3_clear_ring_group(&tqp_vector->rx_group);
+ hns3_clear_ring_group(&tqp_vector->tx_group);
+ netif_napi_del(&priv->tqp_vector[i].napi);
+ }
+}
+
+static void hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv)
+{
+ struct hnae3_handle *h = priv->ae_handle;
+ struct pci_dev *pdev = h->pdev;
+ int i, ret;
+
+ for (i = 0; i < priv->vector_num; i++) {
+ struct hns3_enet_tqp_vector *tqp_vector;
+
+ tqp_vector = &priv->tqp_vector[i];
+ ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq);
+ if (ret)
+ return;
+ }
+
+ devm_kfree(&pdev->dev, priv->tqp_vector);
+}
+
+static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
+ unsigned int ring_type)
+{
+ int queue_num = priv->ae_handle->kinfo.num_tqps;
+ struct hns3_enet_ring *ring;
+ int desc_num;
+
+ if (ring_type == HNAE3_RING_TYPE_TX) {
+ ring = &priv->ring[q->tqp_index];
+ desc_num = priv->ae_handle->kinfo.num_tx_desc;
+ ring->queue_index = q->tqp_index;
+ ring->tx_copybreak = priv->tx_copybreak;
+ ring->last_to_use = 0;
+ } else {
+ ring = &priv->ring[q->tqp_index + queue_num];
+ desc_num = priv->ae_handle->kinfo.num_rx_desc;
+ ring->queue_index = q->tqp_index;
+ ring->rx_copybreak = priv->rx_copybreak;
+ }
+
+ hnae3_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type);
+
+ ring->tqp = q;
+ ring->desc = NULL;
+ ring->desc_cb = NULL;
+ ring->dev = priv->dev;
+ ring->desc_dma_addr = 0;
+ ring->buf_size = q->buf_size;
+ ring->desc_num = desc_num;
+ ring->next_to_use = 0;
+ ring->next_to_clean = 0;
+}
+
+static void hns3_queue_to_ring(struct hnae3_queue *tqp,
+ struct hns3_nic_priv *priv)
+{
+ hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX);
+ hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX);
+}
+
+static int hns3_get_ring_config(struct hns3_nic_priv *priv)
+{
+ struct hnae3_handle *h = priv->ae_handle;
+ struct pci_dev *pdev = h->pdev;
+ int i;
+
+ priv->ring = devm_kzalloc(&pdev->dev,
+ array3_size(h->kinfo.num_tqps,
+ sizeof(*priv->ring), 2),
+ GFP_KERNEL);
+ if (!priv->ring)
+ return -ENOMEM;
+
+ for (i = 0; i < h->kinfo.num_tqps; i++)
+ hns3_queue_to_ring(h->kinfo.tqp[i], priv);
+
+ return 0;
+}
+
+static void hns3_put_ring_config(struct hns3_nic_priv *priv)
+{
+ if (!priv->ring)
+ return;
+
+ devm_kfree(priv->dev, priv->ring);
+ priv->ring = NULL;
+}
+
+static void hns3_alloc_page_pool(struct hns3_enet_ring *ring)
+{
+ struct page_pool_params pp_params = {
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_PAGE_FRAG |
+ PP_FLAG_DMA_SYNC_DEV,
+ .order = hns3_page_order(ring),
+ .pool_size = ring->desc_num * hns3_buf_size(ring) /
+ (PAGE_SIZE << hns3_page_order(ring)),
+ .nid = dev_to_node(ring_to_dev(ring)),
+ .dev = ring_to_dev(ring),
+ .dma_dir = DMA_FROM_DEVICE,
+ .offset = 0,
+ .max_len = PAGE_SIZE << hns3_page_order(ring),
+ };
+
+ ring->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(ring->page_pool)) {
+ dev_warn(ring_to_dev(ring), "page pool creation failed: %ld\n",
+ PTR_ERR(ring->page_pool));
+ ring->page_pool = NULL;
+ }
+}
+
+static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
+{
+ int ret;
+
+ if (ring->desc_num <= 0 || ring->buf_size <= 0)
+ return -EINVAL;
+
+ ring->desc_cb = devm_kcalloc(ring_to_dev(ring), ring->desc_num,
+ sizeof(ring->desc_cb[0]), GFP_KERNEL);
+ if (!ring->desc_cb) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = hns3_alloc_desc(ring);
+ if (ret)
+ goto out_with_desc_cb;
+
+ if (!HNAE3_IS_TX_RING(ring)) {
+ if (page_pool_enabled)
+ hns3_alloc_page_pool(ring);
+
+ ret = hns3_alloc_ring_buffers(ring);
+ if (ret)
+ goto out_with_desc;
+ } else {
+ hns3_init_tx_spare_buffer(ring);
+ }
+
+ return 0;
+
+out_with_desc:
+ hns3_free_desc(ring);
+out_with_desc_cb:
+ devm_kfree(ring_to_dev(ring), ring->desc_cb);
+ ring->desc_cb = NULL;
+out:
+ return ret;
+}
+
+void hns3_fini_ring(struct hns3_enet_ring *ring)
+{
+ hns3_free_desc(ring);
+ devm_kfree(ring_to_dev(ring), ring->desc_cb);
+ ring->desc_cb = NULL;
+ ring->next_to_clean = 0;
+ ring->next_to_use = 0;
+ ring->last_to_use = 0;
+ ring->pending_buf = 0;
+ if (!HNAE3_IS_TX_RING(ring) && ring->skb) {
+ dev_kfree_skb_any(ring->skb);
+ ring->skb = NULL;
+ } else if (HNAE3_IS_TX_RING(ring) && ring->tx_spare) {
+ struct hns3_tx_spare *tx_spare = ring->tx_spare;
+
+ dma_unmap_page(ring_to_dev(ring), tx_spare->dma, tx_spare->len,
+ DMA_TO_DEVICE);
+ free_pages((unsigned long)tx_spare->buf,
+ get_order(tx_spare->len));
+ devm_kfree(ring_to_dev(ring), tx_spare);
+ ring->tx_spare = NULL;
+ }
+
+ if (!HNAE3_IS_TX_RING(ring) && ring->page_pool) {
+ page_pool_destroy(ring->page_pool);
+ ring->page_pool = NULL;
+ }
+}
+
+static int hns3_buf_size2type(u32 buf_size)
+{
+ int bd_size_type;
+
+ switch (buf_size) {
+ case 512:
+ bd_size_type = HNS3_BD_SIZE_512_TYPE;
+ break;
+ case 1024:
+ bd_size_type = HNS3_BD_SIZE_1024_TYPE;
+ break;
+ case 2048:
+ bd_size_type = HNS3_BD_SIZE_2048_TYPE;
+ break;
+ case 4096:
+ bd_size_type = HNS3_BD_SIZE_4096_TYPE;
+ break;
+ default:
+ bd_size_type = HNS3_BD_SIZE_2048_TYPE;
+ }
+
+ return bd_size_type;
+}
+
+static void hns3_init_ring_hw(struct hns3_enet_ring *ring)
+{
+ dma_addr_t dma = ring->desc_dma_addr;
+ struct hnae3_queue *q = ring->tqp;
+
+ if (!HNAE3_IS_TX_RING(ring)) {
+ hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG, (u32)dma);
+ hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG,
+ (u32)((dma >> 31) >> 1));
+
+ hns3_write_dev(q, HNS3_RING_RX_RING_BD_LEN_REG,
+ hns3_buf_size2type(ring->buf_size));
+ hns3_write_dev(q, HNS3_RING_RX_RING_BD_NUM_REG,
+ ring->desc_num / 8 - 1);
+ } else {
+ hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_L_REG,
+ (u32)dma);
+ hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG,
+ (u32)((dma >> 31) >> 1));
+
+ hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG,
+ ring->desc_num / 8 - 1);
+ }
+}
+
+static void hns3_init_tx_ring_tc(struct hns3_nic_priv *priv)
+{
+ struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo;
+ struct hnae3_tc_info *tc_info = &kinfo->tc_info;
+ int i;
+
+ for (i = 0; i < tc_info->num_tc; i++) {
+ int j;
+
+ for (j = 0; j < tc_info->tqp_count[i]; j++) {
+ struct hnae3_queue *q;
+
+ q = priv->ring[tc_info->tqp_offset[i] + j].tqp;
+ hns3_write_dev(q, HNS3_RING_TX_RING_TC_REG, i);
+ }
+ }
+}
+
+int hns3_init_all_ring(struct hns3_nic_priv *priv)
+{
+ struct hnae3_handle *h = priv->ae_handle;
+ int ring_num = h->kinfo.num_tqps * 2;
+ int i, j;
+ int ret;
+
+ for (i = 0; i < ring_num; i++) {
+ ret = hns3_alloc_ring_memory(&priv->ring[i]);
+ if (ret) {
+ dev_err(priv->dev,
+ "Alloc ring memory fail! ret=%d\n", ret);
+ goto out_when_alloc_ring_memory;
+ }
+
+ u64_stats_init(&priv->ring[i].syncp);
+ }
+
+ return 0;
+
+out_when_alloc_ring_memory:
+ for (j = i - 1; j >= 0; j--)
+ hns3_fini_ring(&priv->ring[j]);
+
+ return -ENOMEM;
+}
+
+static void hns3_uninit_all_ring(struct hns3_nic_priv *priv)
+{
+ struct hnae3_handle *h = priv->ae_handle;
+ int i;
+
+ for (i = 0; i < h->kinfo.num_tqps; i++) {
+ hns3_fini_ring(&priv->ring[i]);
+ hns3_fini_ring(&priv->ring[i + h->kinfo.num_tqps]);
+ }
+}
+
+/* Set mac addr if it is configured. or leave it to the AE driver */
+static int hns3_init_mac_addr(struct net_device *netdev)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
+ struct hnae3_handle *h = priv->ae_handle;
+ u8 mac_addr_temp[ETH_ALEN] = {0};
+ int ret = 0;
+
+ if (h->ae_algo->ops->get_mac_addr)
+ h->ae_algo->ops->get_mac_addr(h, mac_addr_temp);
+
+ /* Check if the MAC address is valid, if not get a random one */
+ if (!is_valid_ether_addr(mac_addr_temp)) {
+ eth_hw_addr_random(netdev);
+ hnae3_format_mac_addr(format_mac_addr, netdev->dev_addr);
+ dev_warn(priv->dev, "using random MAC address %s\n",
+ format_mac_addr);
+ } else if (!ether_addr_equal(netdev->dev_addr, mac_addr_temp)) {
+ eth_hw_addr_set(netdev, mac_addr_temp);
+ ether_addr_copy(netdev->perm_addr, mac_addr_temp);
+ } else {
+ return 0;
+ }
+
+ if (h->ae_algo->ops->set_mac_addr)
+ ret = h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr, true);
+
+ return ret;
+}
+
+static int hns3_init_phy(struct net_device *netdev)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ int ret = 0;
+
+ if (h->ae_algo->ops->mac_connect_phy)
+ ret = h->ae_algo->ops->mac_connect_phy(h);
+
+ return ret;
+}
+
+static void hns3_uninit_phy(struct net_device *netdev)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (h->ae_algo->ops->mac_disconnect_phy)
+ h->ae_algo->ops->mac_disconnect_phy(h);
+}
+
+static int hns3_client_start(struct hnae3_handle *handle)
+{
+ if (!handle->ae_algo->ops->client_start)
+ return 0;
+
+ return handle->ae_algo->ops->client_start(handle);
+}
+
+static void hns3_client_stop(struct hnae3_handle *handle)
+{
+ if (!handle->ae_algo->ops->client_stop)
+ return;
+
+ handle->ae_algo->ops->client_stop(handle);
+}
+
+static void hns3_info_show(struct hns3_nic_priv *priv)
+{
+ struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo;
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
+
+ hnae3_format_mac_addr(format_mac_addr, priv->netdev->dev_addr);
+ dev_info(priv->dev, "MAC address: %s\n", format_mac_addr);
+ dev_info(priv->dev, "Task queue pairs numbers: %u\n", kinfo->num_tqps);
+ dev_info(priv->dev, "RSS size: %u\n", kinfo->rss_size);
+ dev_info(priv->dev, "Allocated RSS size: %u\n", kinfo->req_rss_size);
+ dev_info(priv->dev, "RX buffer length: %u\n", kinfo->rx_buf_len);
+ dev_info(priv->dev, "Desc num per TX queue: %u\n", kinfo->num_tx_desc);
+ dev_info(priv->dev, "Desc num per RX queue: %u\n", kinfo->num_rx_desc);
+ dev_info(priv->dev, "Total number of enabled TCs: %u\n",
+ kinfo->tc_info.num_tc);
+ dev_info(priv->dev, "Max mtu size: %u\n", priv->netdev->max_mtu);
+}
+
+static void hns3_set_cq_period_mode(struct hns3_nic_priv *priv,
+ enum dim_cq_period_mode mode, bool is_tx)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
+ struct hnae3_handle *handle = priv->ae_handle;
+ int i;
+
+ if (is_tx) {
+ priv->tx_cqe_mode = mode;
+
+ for (i = 0; i < priv->vector_num; i++)
+ priv->tqp_vector[i].tx_group.dim.mode = mode;
+ } else {
+ priv->rx_cqe_mode = mode;
+
+ for (i = 0; i < priv->vector_num; i++)
+ priv->tqp_vector[i].rx_group.dim.mode = mode;
+ }
+
+ if (hnae3_ae_dev_cq_supported(ae_dev)) {
+ u32 new_mode;
+ u64 reg;
+
+ new_mode = (mode == DIM_CQ_PERIOD_MODE_START_FROM_CQE) ?
+ HNS3_CQ_MODE_CQE : HNS3_CQ_MODE_EQE;
+ reg = is_tx ? HNS3_GL1_CQ_MODE_REG : HNS3_GL0_CQ_MODE_REG;
+
+ writel(new_mode, handle->kinfo.io_base + reg);
+ }
+}
+
+void hns3_cq_period_mode_init(struct hns3_nic_priv *priv,
+ enum dim_cq_period_mode tx_mode,
+ enum dim_cq_period_mode rx_mode)
+{
+ hns3_set_cq_period_mode(priv, tx_mode, true);
+ hns3_set_cq_period_mode(priv, rx_mode, false);
+}
+
+static void hns3_state_init(struct hnae3_handle *handle)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
+ struct net_device *netdev = handle->kinfo.netdev;
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+
+ set_bit(HNS3_NIC_STATE_INITED, &priv->state);
+
+ if (test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps))
+ set_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state);
+
+ if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
+ set_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->supported_pflags);
+
+ if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps))
+ set_bit(HNS3_NIC_STATE_HW_TX_CSUM_ENABLE, &priv->state);
+
+ if (hnae3_ae_dev_rxd_adv_layout_supported(ae_dev))
+ set_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state);
+}
+
+static void hns3_state_uninit(struct hnae3_handle *handle)
+{
+ struct hns3_nic_priv *priv = handle->priv;
+
+ clear_bit(HNS3_NIC_STATE_INITED, &priv->state);
+}
+
+static int hns3_client_init(struct hnae3_handle *handle)
+{
+ struct pci_dev *pdev = handle->pdev;
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+ u16 alloc_tqps, max_rss_size;
+ struct hns3_nic_priv *priv;
+ struct net_device *netdev;
+ int ret;
+
+ handle->ae_algo->ops->get_tqps_and_rss_info(handle, &alloc_tqps,
+ &max_rss_size);
+ netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv), alloc_tqps);
+ if (!netdev)
+ return -ENOMEM;
+
+ priv = netdev_priv(netdev);
+ priv->dev = &pdev->dev;
+ priv->netdev = netdev;
+ priv->ae_handle = handle;
+ priv->tx_timeout_count = 0;
+ priv->max_non_tso_bd_num = ae_dev->dev_specs.max_non_tso_bd_num;
+ set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
+
+ handle->msg_enable = netif_msg_init(debug, DEFAULT_MSG_LEVEL);
+
+ handle->kinfo.netdev = netdev;
+ handle->priv = (void *)priv;
+
+ hns3_init_mac_addr(netdev);
+
+ hns3_set_default_feature(netdev);
+
+ netdev->watchdog_timeo = HNS3_TX_TIMEOUT;
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+ netdev->netdev_ops = &hns3_nic_netdev_ops;
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+ hns3_ethtool_set_ops(netdev);
+
+ /* Carrier off reporting is important to ethtool even BEFORE open */
+ netif_carrier_off(netdev);
+
+ ret = hns3_get_ring_config(priv);
+ if (ret) {
+ ret = -ENOMEM;
+ goto out_get_ring_cfg;
+ }
+
+ hns3_nic_init_coal_cfg(priv);
+
+ ret = hns3_nic_alloc_vector_data(priv);
+ if (ret) {
+ ret = -ENOMEM;
+ goto out_alloc_vector_data;
+ }
+
+ ret = hns3_nic_init_vector_data(priv);
+ if (ret) {
+ ret = -ENOMEM;
+ goto out_init_vector_data;
+ }
+
+ ret = hns3_init_all_ring(priv);
+ if (ret) {
+ ret = -ENOMEM;
+ goto out_init_ring;
+ }
+
+ hns3_cq_period_mode_init(priv, DIM_CQ_PERIOD_MODE_START_FROM_EQE,
+ DIM_CQ_PERIOD_MODE_START_FROM_EQE);
+
+ ret = hns3_init_phy(netdev);
+ if (ret)
+ goto out_init_phy;
+
+ /* the device can work without cpu rmap, only aRFS needs it */
+ ret = hns3_set_rx_cpu_rmap(netdev);
+ if (ret)
+ dev_warn(priv->dev, "set rx cpu rmap fail, ret=%d\n", ret);
+
+ ret = hns3_nic_init_irq(priv);
+ if (ret) {
+ dev_err(priv->dev, "init irq failed! ret=%d\n", ret);
+ hns3_free_rx_cpu_rmap(netdev);
+ goto out_init_irq_fail;
+ }
+
+ ret = hns3_client_start(handle);
+ if (ret) {
+ dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret);
+ goto out_client_start;
+ }
+
+ hns3_dcbnl_setup(handle);
+
+ ret = hns3_dbg_init(handle);
+ if (ret) {
+ dev_err(priv->dev, "failed to init debugfs, ret = %d\n",
+ ret);
+ goto out_client_start;
+ }
+
+ netdev->max_mtu = HNS3_MAX_MTU(ae_dev->dev_specs.max_frm_size);
+
+ hns3_state_init(handle);
+
+ ret = register_netdev(netdev);
+ if (ret) {
+ dev_err(priv->dev, "probe register netdev fail!\n");
+ goto out_reg_netdev_fail;
+ }
+
+ if (netif_msg_drv(handle))
+ hns3_info_show(priv);
+
+ return ret;
+
+out_reg_netdev_fail:
+ hns3_state_uninit(handle);
+ hns3_dbg_uninit(handle);
+ hns3_client_stop(handle);
+out_client_start:
+ hns3_free_rx_cpu_rmap(netdev);
+ hns3_nic_uninit_irq(priv);
+out_init_irq_fail:
+ hns3_uninit_phy(netdev);
+out_init_phy:
+ hns3_uninit_all_ring(priv);
+out_init_ring:
+ hns3_nic_uninit_vector_data(priv);
+out_init_vector_data:
+ hns3_nic_dealloc_vector_data(priv);
+out_alloc_vector_data:
+ priv->ring = NULL;
+out_get_ring_cfg:
+ priv->ae_handle = NULL;
+ free_netdev(netdev);
+ return ret;
+}
+
+static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
+{
+ struct net_device *netdev = handle->kinfo.netdev;
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+
+ if (netdev->reg_state != NETREG_UNINITIALIZED)
+ unregister_netdev(netdev);
+
+ hns3_client_stop(handle);
+
+ hns3_uninit_phy(netdev);
+
+ if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
+ netdev_warn(netdev, "already uninitialized\n");
+ goto out_netdev_free;
+ }
+
+ hns3_free_rx_cpu_rmap(netdev);
+
+ hns3_nic_uninit_irq(priv);
+
+ hns3_clear_all_ring(handle, true);
+
+ hns3_nic_uninit_vector_data(priv);
+
+ hns3_nic_dealloc_vector_data(priv);
+
+ hns3_uninit_all_ring(priv);
+
+ hns3_put_ring_config(priv);
+
+out_netdev_free:
+ hns3_dbg_uninit(handle);
+ free_netdev(netdev);
+}
+
+static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup)
+{
+ struct net_device *netdev = handle->kinfo.netdev;
+
+ if (!netdev)
+ return;
+
+ if (linkup) {
+ netif_tx_wake_all_queues(netdev);
+ netif_carrier_on(netdev);
+ if (netif_msg_link(handle))
+ netdev_info(netdev, "link up\n");
+ } else {
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+ if (netif_msg_link(handle))
+ netdev_info(netdev, "link down\n");
+ }
+}
+
+static void hns3_clear_tx_ring(struct hns3_enet_ring *ring)
+{
+ while (ring->next_to_clean != ring->next_to_use) {
+ ring->desc[ring->next_to_clean].tx.bdtp_fe_sc_vld_ra_ri = 0;
+ hns3_free_buffer_detach(ring, ring->next_to_clean, 0);
+ ring_ptr_move_fw(ring, next_to_clean);
+ }
+
+ ring->pending_buf = 0;
+}
+
+static int hns3_clear_rx_ring(struct hns3_enet_ring *ring)
+{
+ struct hns3_desc_cb res_cbs;
+ int ret;
+
+ while (ring->next_to_use != ring->next_to_clean) {
+ /* When a buffer is not reused, it's memory has been
+ * freed in hns3_handle_rx_bd or will be freed by
+ * stack, so we need to replace the buffer here.
+ */
+ if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
+ ret = hns3_alloc_and_map_buffer(ring, &res_cbs);
+ if (ret) {
+ hns3_ring_stats_update(ring, sw_err_cnt);
+ /* if alloc new buffer fail, exit directly
+ * and reclear in up flow.
+ */
+ netdev_warn(ring_to_netdev(ring),
+ "reserve buffer map failed, ret = %d\n",
+ ret);
+ return ret;
+ }
+ hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
+ }
+ ring_ptr_move_fw(ring, next_to_use);
+ }
+
+ /* Free the pending skb in rx ring */
+ if (ring->skb) {
+ dev_kfree_skb_any(ring->skb);
+ ring->skb = NULL;
+ ring->pending_buf = 0;
+ }
+
+ return 0;
+}
+
+static void hns3_force_clear_rx_ring(struct hns3_enet_ring *ring)
+{
+ while (ring->next_to_use != ring->next_to_clean) {
+ /* When a buffer is not reused, it's memory has been
+ * freed in hns3_handle_rx_bd or will be freed by
+ * stack, so only need to unmap the buffer here.
+ */
+ if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
+ hns3_unmap_buffer(ring,
+ &ring->desc_cb[ring->next_to_use]);
+ ring->desc_cb[ring->next_to_use].dma = 0;
+ }
+
+ ring_ptr_move_fw(ring, next_to_use);
+ }
+}
+
+static void hns3_clear_all_ring(struct hnae3_handle *h, bool force)
+{
+ struct net_device *ndev = h->kinfo.netdev;
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ u32 i;
+
+ for (i = 0; i < h->kinfo.num_tqps; i++) {
+ struct hns3_enet_ring *ring;
+
+ ring = &priv->ring[i];
+ hns3_clear_tx_ring(ring);
+
+ ring = &priv->ring[i + h->kinfo.num_tqps];
+ /* Continue to clear other rings even if clearing some
+ * rings failed.
+ */
+ if (force)
+ hns3_force_clear_rx_ring(ring);
+ else
+ hns3_clear_rx_ring(ring);
+ }
+}
+
+int hns3_nic_reset_all_ring(struct hnae3_handle *h)
+{
+ struct net_device *ndev = h->kinfo.netdev;
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hns3_enet_ring *rx_ring;
+ int i, j;
+ int ret;
+
+ ret = h->ae_algo->ops->reset_queue(h);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < h->kinfo.num_tqps; i++) {
+ hns3_init_ring_hw(&priv->ring[i]);
+
+ /* We need to clear tx ring here because self test will
+ * use the ring and will not run down before up
+ */
+ hns3_clear_tx_ring(&priv->ring[i]);
+ priv->ring[i].next_to_clean = 0;
+ priv->ring[i].next_to_use = 0;
+ priv->ring[i].last_to_use = 0;
+
+ rx_ring = &priv->ring[i + h->kinfo.num_tqps];
+ hns3_init_ring_hw(rx_ring);
+ ret = hns3_clear_rx_ring(rx_ring);
+ if (ret)
+ return ret;
+
+ /* We can not know the hardware head and tail when this
+ * function is called in reset flow, so we reuse all desc.
+ */
+ for (j = 0; j < rx_ring->desc_num; j++)
+ hns3_reuse_buffer(rx_ring, j);
+
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+ }
+
+ hns3_init_tx_ring_tc(priv);
+
+ return 0;
+}
+
+static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
+{
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ struct net_device *ndev = kinfo->netdev;
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+
+ if (test_and_set_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
+ return 0;
+
+ if (!netif_running(ndev))
+ return 0;
+
+ return hns3_nic_net_stop(ndev);
+}
+
+static int hns3_reset_notify_up_enet(struct hnae3_handle *handle)
+{
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ struct hns3_nic_priv *priv = netdev_priv(kinfo->netdev);
+ int ret = 0;
+
+ if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
+ netdev_err(kinfo->netdev, "device is not initialized yet\n");
+ return -EFAULT;
+ }
+
+ clear_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
+
+ if (netif_running(kinfo->netdev)) {
+ ret = hns3_nic_net_open(kinfo->netdev);
+ if (ret) {
+ set_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
+ netdev_err(kinfo->netdev,
+ "net up fail, ret=%d!\n", ret);
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
+{
+ struct net_device *netdev = handle->kinfo.netdev;
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ int ret;
+
+ /* Carrier off reporting is important to ethtool even BEFORE open */
+ netif_carrier_off(netdev);
+
+ ret = hns3_get_ring_config(priv);
+ if (ret)
+ return ret;
+
+ ret = hns3_nic_alloc_vector_data(priv);
+ if (ret)
+ goto err_put_ring;
+
+ ret = hns3_nic_init_vector_data(priv);
+ if (ret)
+ goto err_dealloc_vector;
+
+ ret = hns3_init_all_ring(priv);
+ if (ret)
+ goto err_uninit_vector;
+
+ hns3_cq_period_mode_init(priv, priv->tx_cqe_mode, priv->rx_cqe_mode);
+
+ /* the device can work without cpu rmap, only aRFS needs it */
+ ret = hns3_set_rx_cpu_rmap(netdev);
+ if (ret)
+ dev_warn(priv->dev, "set rx cpu rmap fail, ret=%d\n", ret);
+
+ ret = hns3_nic_init_irq(priv);
+ if (ret) {
+ dev_err(priv->dev, "init irq failed! ret=%d\n", ret);
+ hns3_free_rx_cpu_rmap(netdev);
+ goto err_init_irq_fail;
+ }
+
+ if (!hns3_is_phys_func(handle->pdev))
+ hns3_init_mac_addr(netdev);
+
+ ret = hns3_client_start(handle);
+ if (ret) {
+ dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret);
+ goto err_client_start_fail;
+ }
+
+ set_bit(HNS3_NIC_STATE_INITED, &priv->state);
+
+ return ret;
+
+err_client_start_fail:
+ hns3_free_rx_cpu_rmap(netdev);
+ hns3_nic_uninit_irq(priv);
+err_init_irq_fail:
+ hns3_uninit_all_ring(priv);
+err_uninit_vector:
+ hns3_nic_uninit_vector_data(priv);
+err_dealloc_vector:
+ hns3_nic_dealloc_vector_data(priv);
+err_put_ring:
+ hns3_put_ring_config(priv);
+
+ return ret;
+}
+
+static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
+{
+ struct net_device *netdev = handle->kinfo.netdev;
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+
+ if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
+ netdev_warn(netdev, "already uninitialized\n");
+ return 0;
+ }
+
+ hns3_free_rx_cpu_rmap(netdev);
+ hns3_nic_uninit_irq(priv);
+ hns3_clear_all_ring(handle, true);
+ hns3_reset_tx_queue(priv->ae_handle);
+
+ hns3_nic_uninit_vector_data(priv);
+
+ hns3_nic_dealloc_vector_data(priv);
+
+ hns3_uninit_all_ring(priv);
+
+ hns3_put_ring_config(priv);
+
+ return 0;
+}
+
+int hns3_reset_notify(struct hnae3_handle *handle,
+ enum hnae3_reset_notify_type type)
+{
+ int ret = 0;
+
+ switch (type) {
+ case HNAE3_UP_CLIENT:
+ ret = hns3_reset_notify_up_enet(handle);
+ break;
+ case HNAE3_DOWN_CLIENT:
+ ret = hns3_reset_notify_down_enet(handle);
+ break;
+ case HNAE3_INIT_CLIENT:
+ ret = hns3_reset_notify_init_enet(handle);
+ break;
+ case HNAE3_UNINIT_CLIENT:
+ ret = hns3_reset_notify_uninit_enet(handle);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int hns3_change_channels(struct hnae3_handle *handle, u32 new_tqp_num,
+ bool rxfh_configured)
+{
+ int ret;
+
+ ret = handle->ae_algo->ops->set_channels(handle, new_tqp_num,
+ rxfh_configured);
+ if (ret) {
+ dev_err(&handle->pdev->dev,
+ "Change tqp num(%u) fail.\n", new_tqp_num);
+ return ret;
+ }
+
+ ret = hns3_reset_notify(handle, HNAE3_INIT_CLIENT);
+ if (ret)
+ return ret;
+
+ ret = hns3_reset_notify(handle, HNAE3_UP_CLIENT);
+ if (ret)
+ hns3_reset_notify(handle, HNAE3_UNINIT_CLIENT);
+
+ return ret;
+}
+
+int hns3_set_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ struct hnae3_knic_private_info *kinfo = &h->kinfo;
+ bool rxfh_configured = netif_is_rxfh_configured(netdev);
+ u32 new_tqp_num = ch->combined_count;
+ u16 org_tqp_num;
+ int ret;
+
+ if (hns3_nic_resetting(netdev))
+ return -EBUSY;
+
+ if (ch->rx_count || ch->tx_count)
+ return -EINVAL;
+
+ if (kinfo->tc_info.mqprio_active) {
+ dev_err(&netdev->dev,
+ "it's not allowed to set channels via ethtool when MQPRIO mode is on\n");
+ return -EINVAL;
+ }
+
+ if (new_tqp_num > hns3_get_max_available_channels(h) ||
+ new_tqp_num < 1) {
+ dev_err(&netdev->dev,
+ "Change tqps fail, the tqp range is from 1 to %u",
+ hns3_get_max_available_channels(h));
+ return -EINVAL;
+ }
+
+ if (kinfo->rss_size == new_tqp_num)
+ return 0;
+
+ netif_dbg(h, drv, netdev,
+ "set channels: tqp_num=%u, rxfh=%d\n",
+ new_tqp_num, rxfh_configured);
+
+ ret = hns3_reset_notify(h, HNAE3_DOWN_CLIENT);
+ if (ret)
+ return ret;
+
+ ret = hns3_reset_notify(h, HNAE3_UNINIT_CLIENT);
+ if (ret)
+ return ret;
+
+ org_tqp_num = h->kinfo.num_tqps;
+ ret = hns3_change_channels(h, new_tqp_num, rxfh_configured);
+ if (ret) {
+ int ret1;
+
+ netdev_warn(netdev,
+ "Change channels fail, revert to old value\n");
+ ret1 = hns3_change_channels(h, org_tqp_num, rxfh_configured);
+ if (ret1) {
+ netdev_err(netdev,
+ "revert to old channel fail\n");
+ return ret1;
+ }
+
+ return ret;
+ }
+
+ return 0;
+}
+
+void hns3_external_lb_prepare(struct net_device *ndev, bool if_running)
+{
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hnae3_handle *h = priv->ae_handle;
+ int i;
+
+ if (!if_running)
+ return;
+
+ if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
+ return;
+
+ netif_carrier_off(ndev);
+ netif_tx_disable(ndev);
+
+ for (i = 0; i < priv->vector_num; i++)
+ hns3_vector_disable(&priv->tqp_vector[i]);
+
+ for (i = 0; i < h->kinfo.num_tqps; i++)
+ hns3_tqp_disable(h->kinfo.tqp[i]);
+
+ /* delay ring buffer clearing to hns3_reset_notify_uninit_enet
+ * during reset process, because driver may not be able
+ * to disable the ring through firmware when downing the netdev.
+ */
+ if (!hns3_nic_resetting(ndev))
+ hns3_nic_reset_all_ring(priv->ae_handle);
+
+ hns3_reset_tx_queue(priv->ae_handle);
+}
+
+void hns3_external_lb_restore(struct net_device *ndev, bool if_running)
+{
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hnae3_handle *h = priv->ae_handle;
+ int i;
+
+ if (!if_running)
+ return;
+
+ if (hns3_nic_resetting(ndev))
+ return;
+
+ if (!test_bit(HNS3_NIC_STATE_DOWN, &priv->state))
+ return;
+
+ if (hns3_nic_reset_all_ring(priv->ae_handle))
+ return;
+
+ clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
+
+ for (i = 0; i < priv->vector_num; i++)
+ hns3_vector_enable(&priv->tqp_vector[i]);
+
+ for (i = 0; i < h->kinfo.num_tqps; i++)
+ hns3_tqp_enable(h->kinfo.tqp[i]);
+
+ netif_tx_wake_all_queues(ndev);
+
+ if (h->ae_algo->ops->get_status(h))
+ netif_carrier_on(ndev);
+}
+
+static const struct hns3_hw_error_info hns3_hw_err[] = {
+ { .type = HNAE3_PPU_POISON_ERROR,
+ .msg = "PPU poison" },
+ { .type = HNAE3_CMDQ_ECC_ERROR,
+ .msg = "IMP CMDQ error" },
+ { .type = HNAE3_IMP_RD_POISON_ERROR,
+ .msg = "IMP RD poison" },
+ { .type = HNAE3_ROCEE_AXI_RESP_ERROR,
+ .msg = "ROCEE AXI RESP error" },
+};
+
+static void hns3_process_hw_error(struct hnae3_handle *handle,
+ enum hnae3_hw_error_type type)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hns3_hw_err); i++) {
+ if (hns3_hw_err[i].type == type) {
+ dev_err(&handle->pdev->dev, "Detected %s!\n",
+ hns3_hw_err[i].msg);
+ break;
+ }
+ }
+}
+
+static const struct hnae3_client_ops client_ops = {
+ .init_instance = hns3_client_init,
+ .uninit_instance = hns3_client_uninit,
+ .link_status_change = hns3_link_status_change,
+ .reset_notify = hns3_reset_notify,
+ .process_hw_error = hns3_process_hw_error,
+};
+
+/* hns3_init_module - Driver registration routine
+ * hns3_init_module is the first routine called when the driver is
+ * loaded. All it does is register with the PCI subsystem.
+ */
+static int __init hns3_init_module(void)
+{
+ int ret;
+
+ pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string);
+ pr_info("%s: %s\n", hns3_driver_name, hns3_copyright);
+
+ client.type = HNAE3_CLIENT_KNIC;
+ snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH, "%s",
+ hns3_driver_name);
+
+ client.ops = &client_ops;
+
+ INIT_LIST_HEAD(&client.node);
+
+ hns3_dbg_register_debugfs(hns3_driver_name);
+
+ ret = hnae3_register_client(&client);
+ if (ret)
+ goto err_reg_client;
+
+ ret = pci_register_driver(&hns3_driver);
+ if (ret)
+ goto err_reg_driver;
+
+ return ret;
+
+err_reg_driver:
+ hnae3_unregister_client(&client);
+err_reg_client:
+ hns3_dbg_unregister_debugfs();
+ return ret;
+}
+module_init(hns3_init_module);
+
+/* hns3_exit_module - Driver exit cleanup routine
+ * hns3_exit_module is called just before the driver is removed
+ * from memory.
+ */
+static void __exit hns3_exit_module(void)
+{
+ pci_unregister_driver(&hns3_driver);
+ hnae3_unregister_client(&client);
+ hns3_dbg_unregister_debugfs();
+}
+module_exit(hns3_exit_module);
+
+MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver");
+MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("pci:hns-nic");
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
new file mode 100644
index 0000000000..acd756b0c7
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -0,0 +1,757 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+// Copyright (c) 2016-2017 Hisilicon Limited.
+
+#ifndef __HNS3_ENET_H
+#define __HNS3_ENET_H
+
+#include <linux/dim.h>
+#include <linux/if_vlan.h>
+#include <net/page_pool/types.h>
+#include <asm/barrier.h>
+
+#include "hnae3.h"
+
+struct iphdr;
+struct ipv6hdr;
+
+enum hns3_nic_state {
+ HNS3_NIC_STATE_TESTING,
+ HNS3_NIC_STATE_RESETTING,
+ HNS3_NIC_STATE_INITED,
+ HNS3_NIC_STATE_DOWN,
+ HNS3_NIC_STATE_DISABLED,
+ HNS3_NIC_STATE_REMOVING,
+ HNS3_NIC_STATE_SERVICE_INITED,
+ HNS3_NIC_STATE_SERVICE_SCHED,
+ HNS3_NIC_STATE2_RESET_REQUESTED,
+ HNS3_NIC_STATE_HW_TX_CSUM_ENABLE,
+ HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE,
+ HNS3_NIC_STATE_TX_PUSH_ENABLE,
+ HNS3_NIC_STATE_MAX
+};
+
+#define HNS3_MAX_PUSH_BD_NUM 2
+
+#define HNS3_RING_RX_RING_BASEADDR_L_REG 0x00000
+#define HNS3_RING_RX_RING_BASEADDR_H_REG 0x00004
+#define HNS3_RING_RX_RING_BD_NUM_REG 0x00008
+#define HNS3_RING_RX_RING_BD_LEN_REG 0x0000C
+#define HNS3_RING_RX_RING_TAIL_REG 0x00018
+#define HNS3_RING_RX_RING_HEAD_REG 0x0001C
+#define HNS3_RING_RX_RING_FBDNUM_REG 0x00020
+#define HNS3_RING_RX_RING_PKTNUM_RECORD_REG 0x0002C
+
+#define HNS3_RING_TX_RING_BASEADDR_L_REG 0x00040
+#define HNS3_RING_TX_RING_BASEADDR_H_REG 0x00044
+#define HNS3_RING_TX_RING_BD_NUM_REG 0x00048
+#define HNS3_RING_TX_RING_TC_REG 0x00050
+#define HNS3_RING_TX_RING_TAIL_REG 0x00058
+#define HNS3_RING_TX_RING_HEAD_REG 0x0005C
+#define HNS3_RING_TX_RING_FBDNUM_REG 0x00060
+#define HNS3_RING_TX_RING_OFFSET_REG 0x00064
+#define HNS3_RING_TX_RING_EBDNUM_REG 0x00068
+#define HNS3_RING_TX_RING_PKTNUM_RECORD_REG 0x0006C
+#define HNS3_RING_TX_RING_EBD_OFFSET_REG 0x00070
+#define HNS3_RING_TX_RING_BD_ERR_REG 0x00074
+#define HNS3_RING_EN_REG 0x00090
+#define HNS3_RING_RX_EN_REG 0x00098
+#define HNS3_RING_TX_EN_REG 0x000D4
+
+#define HNS3_RX_HEAD_SIZE 256
+
+#define HNS3_TX_TIMEOUT (5 * HZ)
+#define HNS3_RING_NAME_LEN 16
+#define HNS3_BUFFER_SIZE_2048 2048
+#define HNS3_RING_MAX_PENDING 32760
+#define HNS3_RING_MIN_PENDING 72
+#define HNS3_RING_BD_MULTIPLE 8
+/* max frame size of mac */
+#define HNS3_MAX_MTU(max_frm_size) \
+ ((max_frm_size) - (ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN))
+
+#define HNS3_BD_SIZE_512_TYPE 0
+#define HNS3_BD_SIZE_1024_TYPE 1
+#define HNS3_BD_SIZE_2048_TYPE 2
+#define HNS3_BD_SIZE_4096_TYPE 3
+
+#define HNS3_RX_FLAG_VLAN_PRESENT 0x1
+#define HNS3_RX_FLAG_L3ID_IPV4 0x0
+#define HNS3_RX_FLAG_L3ID_IPV6 0x1
+#define HNS3_RX_FLAG_L4ID_UDP 0x0
+#define HNS3_RX_FLAG_L4ID_TCP 0x1
+
+#define HNS3_RXD_DMAC_S 0
+#define HNS3_RXD_DMAC_M (0x3 << HNS3_RXD_DMAC_S)
+#define HNS3_RXD_VLAN_S 2
+#define HNS3_RXD_VLAN_M (0x3 << HNS3_RXD_VLAN_S)
+#define HNS3_RXD_L3ID_S 4
+#define HNS3_RXD_L3ID_M (0xf << HNS3_RXD_L3ID_S)
+#define HNS3_RXD_L4ID_S 8
+#define HNS3_RXD_L4ID_M (0xf << HNS3_RXD_L4ID_S)
+#define HNS3_RXD_FRAG_B 12
+#define HNS3_RXD_STRP_TAGP_S 13
+#define HNS3_RXD_STRP_TAGP_M (0x3 << HNS3_RXD_STRP_TAGP_S)
+
+#define HNS3_RXD_L2E_B 16
+#define HNS3_RXD_L3E_B 17
+#define HNS3_RXD_L4E_B 18
+#define HNS3_RXD_TRUNCAT_B 19
+#define HNS3_RXD_HOI_B 20
+#define HNS3_RXD_DOI_B 21
+#define HNS3_RXD_OL3E_B 22
+#define HNS3_RXD_OL4E_B 23
+#define HNS3_RXD_GRO_COUNT_S 24
+#define HNS3_RXD_GRO_COUNT_M (0x3f << HNS3_RXD_GRO_COUNT_S)
+#define HNS3_RXD_GRO_FIXID_B 30
+#define HNS3_RXD_GRO_ECN_B 31
+
+#define HNS3_RXD_ODMAC_S 0
+#define HNS3_RXD_ODMAC_M (0x3 << HNS3_RXD_ODMAC_S)
+#define HNS3_RXD_OVLAN_S 2
+#define HNS3_RXD_OVLAN_M (0x3 << HNS3_RXD_OVLAN_S)
+#define HNS3_RXD_OL3ID_S 4
+#define HNS3_RXD_OL3ID_M (0xf << HNS3_RXD_OL3ID_S)
+#define HNS3_RXD_OL4ID_S 8
+#define HNS3_RXD_OL4ID_M (0xf << HNS3_RXD_OL4ID_S)
+#define HNS3_RXD_FBHI_S 12
+#define HNS3_RXD_FBHI_M (0x3 << HNS3_RXD_FBHI_S)
+#define HNS3_RXD_FBLI_S 14
+#define HNS3_RXD_FBLI_M (0x3 << HNS3_RXD_FBLI_S)
+
+#define HNS3_RXD_PTYPE_S 4
+#define HNS3_RXD_PTYPE_M GENMASK(11, 4)
+
+#define HNS3_RXD_BDTYPE_S 0
+#define HNS3_RXD_BDTYPE_M (0xf << HNS3_RXD_BDTYPE_S)
+#define HNS3_RXD_VLD_B 4
+#define HNS3_RXD_UDP0_B 5
+#define HNS3_RXD_EXTEND_B 7
+#define HNS3_RXD_FE_B 8
+#define HNS3_RXD_LUM_B 9
+#define HNS3_RXD_CRCP_B 10
+#define HNS3_RXD_L3L4P_B 11
+#define HNS3_RXD_TSIDX_S 12
+#define HNS3_RXD_TSIDX_M (0x3 << HNS3_RXD_TSIDX_S)
+#define HNS3_RXD_TS_VLD_B 14
+#define HNS3_RXD_LKBK_B 15
+#define HNS3_RXD_GRO_SIZE_S 16
+#define HNS3_RXD_GRO_SIZE_M (0x3fff << HNS3_RXD_GRO_SIZE_S)
+
+#define HNS3_TXD_L3T_S 0
+#define HNS3_TXD_L3T_M (0x3 << HNS3_TXD_L3T_S)
+#define HNS3_TXD_L4T_S 2
+#define HNS3_TXD_L4T_M (0x3 << HNS3_TXD_L4T_S)
+#define HNS3_TXD_L3CS_B 4
+#define HNS3_TXD_L4CS_B 5
+#define HNS3_TXD_VLAN_B 6
+#define HNS3_TXD_TSO_B 7
+
+#define HNS3_TXD_L2LEN_S 8
+#define HNS3_TXD_L2LEN_M (0xff << HNS3_TXD_L2LEN_S)
+#define HNS3_TXD_L3LEN_S 16
+#define HNS3_TXD_L3LEN_M (0xff << HNS3_TXD_L3LEN_S)
+#define HNS3_TXD_L4LEN_S 24
+#define HNS3_TXD_L4LEN_M (0xff << HNS3_TXD_L4LEN_S)
+
+#define HNS3_TXD_CSUM_START_S 8
+#define HNS3_TXD_CSUM_START_M (0xffff << HNS3_TXD_CSUM_START_S)
+
+#define HNS3_TXD_OL3T_S 0
+#define HNS3_TXD_OL3T_M (0x3 << HNS3_TXD_OL3T_S)
+#define HNS3_TXD_OVLAN_B 2
+#define HNS3_TXD_MACSEC_B 3
+#define HNS3_TXD_TUNTYPE_S 4
+#define HNS3_TXD_TUNTYPE_M (0xf << HNS3_TXD_TUNTYPE_S)
+
+#define HNS3_TXD_CSUM_OFFSET_S 8
+#define HNS3_TXD_CSUM_OFFSET_M (0xffff << HNS3_TXD_CSUM_OFFSET_S)
+
+#define HNS3_TXD_BDTYPE_S 0
+#define HNS3_TXD_BDTYPE_M (0xf << HNS3_TXD_BDTYPE_S)
+#define HNS3_TXD_FE_B 4
+#define HNS3_TXD_SC_S 5
+#define HNS3_TXD_SC_M (0x3 << HNS3_TXD_SC_S)
+#define HNS3_TXD_EXTEND_B 7
+#define HNS3_TXD_VLD_B 8
+#define HNS3_TXD_RI_B 9
+#define HNS3_TXD_RA_B 10
+#define HNS3_TXD_TSYN_B 11
+#define HNS3_TXD_DECTTL_S 12
+#define HNS3_TXD_DECTTL_M (0xf << HNS3_TXD_DECTTL_S)
+
+#define HNS3_TXD_OL4CS_B 22
+
+#define HNS3_TXD_MSS_S 0
+#define HNS3_TXD_MSS_M (0x3fff << HNS3_TXD_MSS_S)
+#define HNS3_TXD_HW_CS_B 14
+
+#define HNS3_VECTOR_TX_IRQ BIT_ULL(0)
+#define HNS3_VECTOR_RX_IRQ BIT_ULL(1)
+
+#define HNS3_VECTOR_NOT_INITED 0
+#define HNS3_VECTOR_INITED 1
+
+#define HNS3_MAX_BD_SIZE 65535
+#define HNS3_MAX_TSO_BD_NUM 63U
+#define HNS3_MAX_TSO_SIZE 1048576U
+#define HNS3_MAX_NON_TSO_SIZE 9728U
+
+#define HNS3_VECTOR_GL_MASK GENMASK(11, 0)
+#define HNS3_VECTOR_GL0_OFFSET 0x100
+#define HNS3_VECTOR_GL1_OFFSET 0x200
+#define HNS3_VECTOR_GL2_OFFSET 0x300
+#define HNS3_VECTOR_RL_OFFSET 0x900
+#define HNS3_VECTOR_RL_EN_B 6
+#define HNS3_VECTOR_QL_MASK GENMASK(9, 0)
+#define HNS3_VECTOR_TX_QL_OFFSET 0xe00
+#define HNS3_VECTOR_RX_QL_OFFSET 0xf00
+
+#define HNS3_RING_EN_B 0
+
+#define HNS3_GL0_CQ_MODE_REG 0x20d00
+#define HNS3_GL1_CQ_MODE_REG 0x20d04
+#define HNS3_GL2_CQ_MODE_REG 0x20d08
+#define HNS3_CQ_MODE_EQE 1U
+#define HNS3_CQ_MODE_CQE 0U
+
+enum hns3_pkt_l2t_type {
+ HNS3_L2_TYPE_UNICAST,
+ HNS3_L2_TYPE_MULTICAST,
+ HNS3_L2_TYPE_BROADCAST,
+ HNS3_L2_TYPE_INVALID,
+};
+
+enum hns3_pkt_l3t_type {
+ HNS3_L3T_NONE,
+ HNS3_L3T_IPV6,
+ HNS3_L3T_IPV4,
+ HNS3_L3T_RESERVED
+};
+
+enum hns3_pkt_l4t_type {
+ HNS3_L4T_UNKNOWN,
+ HNS3_L4T_TCP,
+ HNS3_L4T_UDP,
+ HNS3_L4T_SCTP
+};
+
+enum hns3_pkt_ol3t_type {
+ HNS3_OL3T_NONE,
+ HNS3_OL3T_IPV6,
+ HNS3_OL3T_IPV4_NO_CSUM,
+ HNS3_OL3T_IPV4_CSUM
+};
+
+enum hns3_pkt_tun_type {
+ HNS3_TUN_NONE,
+ HNS3_TUN_MAC_IN_UDP,
+ HNS3_TUN_NVGRE,
+ HNS3_TUN_OTHER
+};
+
+/* hardware spec ring buffer format */
+struct __packed hns3_desc {
+ union {
+ __le64 addr;
+ __le16 csum;
+ struct {
+ __le32 ts_nsec;
+ __le32 ts_sec;
+ };
+ };
+ union {
+ struct {
+ __le16 vlan_tag;
+ __le16 send_size;
+ union {
+ __le32 type_cs_vlan_tso_len;
+ struct {
+ __u8 type_cs_vlan_tso;
+ __u8 l2_len;
+ __u8 l3_len;
+ __u8 l4_len;
+ };
+ };
+ __le16 outer_vlan_tag;
+ __le16 tv;
+
+ union {
+ __le32 ol_type_vlan_len_msec;
+ struct {
+ __u8 ol_type_vlan_msec;
+ __u8 ol2_len;
+ __u8 ol3_len;
+ __u8 ol4_len;
+ };
+ };
+
+ __le32 paylen_ol4cs;
+ __le16 bdtp_fe_sc_vld_ra_ri;
+ __le16 mss_hw_csum;
+ } tx;
+
+ struct {
+ __le32 l234_info;
+ __le16 pkt_len;
+ __le16 size;
+
+ __le32 rss_hash;
+ __le16 fd_id;
+ __le16 vlan_tag;
+
+ union {
+ __le32 ol_info;
+ struct {
+ __le16 o_dm_vlan_id_fb;
+ __le16 ot_vlan_tag;
+ };
+ };
+
+ __le32 bd_base_info;
+ } rx;
+ };
+};
+
+enum hns3_desc_type {
+ DESC_TYPE_UNKNOWN = 0,
+ DESC_TYPE_SKB = 1 << 0,
+ DESC_TYPE_FRAGLIST_SKB = 1 << 1,
+ DESC_TYPE_PAGE = 1 << 2,
+ DESC_TYPE_BOUNCE_ALL = 1 << 3,
+ DESC_TYPE_BOUNCE_HEAD = 1 << 4,
+ DESC_TYPE_SGL_SKB = 1 << 5,
+ DESC_TYPE_PP_FRAG = 1 << 6,
+};
+
+struct hns3_desc_cb {
+ dma_addr_t dma; /* dma address of this desc */
+ void *buf; /* cpu addr for a desc */
+
+ /* priv data for the desc, e.g. skb when use with ip stack */
+ void *priv;
+
+ union {
+ u32 page_offset; /* for rx */
+ u32 send_bytes; /* for tx */
+ };
+
+ u32 length; /* length of the buffer */
+
+ u16 reuse_flag;
+ u16 refill;
+
+ /* desc type, used by the ring user to mark the type of the priv data */
+ u16 type;
+ u16 pagecnt_bias;
+};
+
+enum hns3_pkt_l3type {
+ HNS3_L3_TYPE_IPV4,
+ HNS3_L3_TYPE_IPV6,
+ HNS3_L3_TYPE_ARP,
+ HNS3_L3_TYPE_RARP,
+ HNS3_L3_TYPE_IPV4_OPT,
+ HNS3_L3_TYPE_IPV6_EXT,
+ HNS3_L3_TYPE_LLDP,
+ HNS3_L3_TYPE_BPDU,
+ HNS3_L3_TYPE_MAC_PAUSE,
+ HNS3_L3_TYPE_PFC_PAUSE, /* 0x9 */
+
+ /* reserved for 0xA~0xB */
+
+ HNS3_L3_TYPE_CNM = 0xc,
+
+ /* reserved for 0xD~0xE */
+
+ HNS3_L3_TYPE_PARSE_FAIL = 0xf /* must be last */
+};
+
+enum hns3_pkt_l4type {
+ HNS3_L4_TYPE_UDP,
+ HNS3_L4_TYPE_TCP,
+ HNS3_L4_TYPE_GRE,
+ HNS3_L4_TYPE_SCTP,
+ HNS3_L4_TYPE_IGMP,
+ HNS3_L4_TYPE_ICMP,
+
+ /* reserved for 0x6~0xE */
+
+ HNS3_L4_TYPE_PARSE_FAIL = 0xf /* must be last */
+};
+
+enum hns3_pkt_ol3type {
+ HNS3_OL3_TYPE_IPV4 = 0,
+ HNS3_OL3_TYPE_IPV6,
+ /* reserved for 0x2~0x3 */
+ HNS3_OL3_TYPE_IPV4_OPT = 4,
+ HNS3_OL3_TYPE_IPV6_EXT,
+
+ /* reserved for 0x6~0xE */
+
+ HNS3_OL3_TYPE_PARSE_FAIL = 0xf /* must be last */
+};
+
+enum hns3_pkt_ol4type {
+ HNS3_OL4_TYPE_NO_TUN,
+ HNS3_OL4_TYPE_MAC_IN_UDP,
+ HNS3_OL4_TYPE_NVGRE,
+ HNS3_OL4_TYPE_UNKNOWN
+};
+
+struct hns3_rx_ptype {
+ u32 ptype : 8;
+ u32 csum_level : 2;
+ u32 ip_summed : 2;
+ u32 l3_type : 4;
+ u32 valid : 1;
+ u32 hash_type: 3;
+};
+
+struct ring_stats {
+ u64 sw_err_cnt;
+ u64 seg_pkt_cnt;
+ union {
+ struct {
+ u64 tx_pkts;
+ u64 tx_bytes;
+ u64 tx_more;
+ u64 tx_push;
+ u64 tx_mem_doorbell;
+ u64 restart_queue;
+ u64 tx_busy;
+ u64 tx_copy;
+ u64 tx_vlan_err;
+ u64 tx_l4_proto_err;
+ u64 tx_l2l3l4_err;
+ u64 tx_tso_err;
+ u64 over_max_recursion;
+ u64 hw_limitation;
+ u64 tx_bounce;
+ u64 tx_spare_full;
+ u64 copy_bits_err;
+ u64 tx_sgl;
+ u64 skb2sgl_err;
+ u64 map_sg_err;
+ };
+ struct {
+ u64 rx_pkts;
+ u64 rx_bytes;
+ u64 rx_err_cnt;
+ u64 reuse_pg_cnt;
+ u64 err_pkt_len;
+ u64 err_bd_num;
+ u64 l2_err;
+ u64 l3l4_csum_err;
+ u64 csum_complete;
+ u64 rx_multicast;
+ u64 non_reuse_pg;
+ u64 frag_alloc_err;
+ u64 frag_alloc;
+ };
+ __le16 csum;
+ };
+};
+
+struct hns3_tx_spare {
+ dma_addr_t dma;
+ void *buf;
+ u32 next_to_use;
+ u32 next_to_clean;
+ u32 last_to_clean;
+ u32 len;
+};
+
+struct hns3_enet_ring {
+ struct hns3_desc *desc; /* dma map address space */
+ struct hns3_desc_cb *desc_cb;
+ struct hns3_enet_ring *next;
+ struct hns3_enet_tqp_vector *tqp_vector;
+ struct hnae3_queue *tqp;
+ int queue_index;
+ struct device *dev; /* will be used for DMA mapping of descriptors */
+ struct page_pool *page_pool;
+
+ /* statistic */
+ struct ring_stats stats;
+ struct u64_stats_sync syncp;
+
+ dma_addr_t desc_dma_addr;
+ u32 buf_size; /* size for hnae_desc->addr, preset by AE */
+ u16 desc_num; /* total number of desc */
+ int next_to_use; /* idx of next spare desc */
+
+ /* idx of lastest sent desc, the ring is empty when equal to
+ * next_to_use
+ */
+ int next_to_clean;
+ u32 flag; /* ring attribute */
+
+ int pending_buf;
+ union {
+ /* for Tx ring */
+ struct {
+ u32 fd_qb_tx_sample;
+ int last_to_use; /* last idx used by xmit */
+ u32 tx_copybreak;
+ struct hns3_tx_spare *tx_spare;
+ };
+
+ /* for Rx ring */
+ struct {
+ u32 pull_len; /* memcpy len for current rx packet */
+ u32 rx_copybreak;
+ u32 frag_num;
+ /* first buffer address for current packet */
+ unsigned char *va;
+ struct sk_buff *skb;
+ struct sk_buff *tail_skb;
+ };
+ };
+} ____cacheline_internodealigned_in_smp;
+
+enum hns3_flow_level_range {
+ HNS3_FLOW_LOW = 0,
+ HNS3_FLOW_MID = 1,
+ HNS3_FLOW_HIGH = 2,
+ HNS3_FLOW_ULTRA = 3,
+};
+
+#define HNS3_INT_GL_50K 0x0014
+#define HNS3_INT_GL_20K 0x0032
+#define HNS3_INT_GL_18K 0x0036
+#define HNS3_INT_GL_8K 0x007C
+
+#define HNS3_INT_GL_1US BIT(31)
+
+#define HNS3_INT_RL_MAX 0x00EC
+#define HNS3_INT_RL_ENABLE_MASK 0x40
+
+#define HNS3_INT_QL_DEFAULT_CFG 0x20
+
+struct hns3_enet_coalesce {
+ u16 int_gl;
+ u16 int_ql;
+ u16 int_ql_max;
+ u8 adapt_enable : 1;
+ u8 ql_enable : 1;
+ u8 unit_1us : 1;
+ enum hns3_flow_level_range flow_level;
+};
+
+struct hns3_enet_ring_group {
+ /* array of pointers to rings */
+ struct hns3_enet_ring *ring;
+ u64 total_bytes; /* total bytes processed this group */
+ u64 total_packets; /* total packets processed this group */
+ u16 count;
+ struct hns3_enet_coalesce coal;
+ struct dim dim;
+};
+
+struct hns3_enet_tqp_vector {
+ struct hnae3_handle *handle;
+ u8 __iomem *mask_addr;
+ int vector_irq;
+ int irq_init_flag;
+
+ u16 idx; /* index in the TQP vector array per handle. */
+
+ struct napi_struct napi;
+
+ struct hns3_enet_ring_group rx_group;
+ struct hns3_enet_ring_group tx_group;
+
+ cpumask_t affinity_mask;
+ u16 num_tqps; /* total number of tqps in TQP vector */
+ struct irq_affinity_notify affinity_notify;
+
+ char name[HNAE3_INT_NAME_LEN];
+
+ u64 event_cnt;
+} ____cacheline_internodealigned_in_smp;
+
+struct hns3_nic_priv {
+ struct hnae3_handle *ae_handle;
+ struct net_device *netdev;
+ struct device *dev;
+
+ /**
+ * the cb for nic to manage the ring buffer, the first half of the
+ * array is for tx_ring and vice versa for the second half
+ */
+ struct hns3_enet_ring *ring;
+ struct hns3_enet_tqp_vector *tqp_vector;
+ u16 vector_num;
+ u8 max_non_tso_bd_num;
+
+ u64 tx_timeout_count;
+
+ unsigned long state;
+
+ enum dim_cq_period_mode tx_cqe_mode;
+ enum dim_cq_period_mode rx_cqe_mode;
+ struct hns3_enet_coalesce tx_coal;
+ struct hns3_enet_coalesce rx_coal;
+ u32 tx_copybreak;
+ u32 rx_copybreak;
+};
+
+union l3_hdr_info {
+ struct iphdr *v4;
+ struct ipv6hdr *v6;
+ unsigned char *hdr;
+};
+
+union l4_hdr_info {
+ struct tcphdr *tcp;
+ struct udphdr *udp;
+ struct gre_base_hdr *gre;
+ unsigned char *hdr;
+};
+
+struct hns3_hw_error_info {
+ enum hnae3_hw_error_type type;
+ const char *msg;
+};
+
+struct hns3_reset_type_map {
+ enum ethtool_reset_flags rst_flags;
+ enum hnae3_reset_type rst_type;
+};
+
+static inline int ring_space(struct hns3_enet_ring *ring)
+{
+ /* This smp_load_acquire() pairs with smp_store_release() in
+ * hns3_nic_reclaim_one_desc called by hns3_clean_tx_ring.
+ */
+ int begin = smp_load_acquire(&ring->next_to_clean);
+ int end = READ_ONCE(ring->next_to_use);
+
+ return ((end >= begin) ? (ring->desc_num - end + begin) :
+ (begin - end)) - 1;
+}
+
+static inline u32 hns3_tqp_read_reg(struct hns3_enet_ring *ring, u32 reg)
+{
+ return readl_relaxed(ring->tqp->io_base + reg);
+}
+
+static inline u32 hns3_read_reg(void __iomem *base, u32 reg)
+{
+ return readl(base + reg);
+}
+
+static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)
+{
+ u8 __iomem *reg_addr = READ_ONCE(base);
+
+ writel(value, reg_addr + reg);
+}
+
+#define hns3_read_dev(a, reg) \
+ hns3_read_reg((a)->io_base, reg)
+
+static inline bool hns3_nic_resetting(struct net_device *netdev)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+
+ return test_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
+}
+
+#define hns3_write_dev(a, reg, value) \
+ hns3_write_reg((a)->io_base, reg, value)
+
+#define ring_to_dev(ring) ((ring)->dev)
+
+#define ring_to_netdev(ring) ((ring)->tqp_vector->napi.dev)
+
+#define ring_to_dma_dir(ring) (HNAE3_IS_TX_RING(ring) ? \
+ DMA_TO_DEVICE : DMA_FROM_DEVICE)
+
+#define hns3_buf_size(_ring) ((_ring)->buf_size)
+
+#define hns3_ring_stats_update(ring, cnt) do { \
+ typeof(ring) (tmp) = (ring); \
+ u64_stats_update_begin(&(tmp)->syncp); \
+ ((tmp)->stats.cnt)++; \
+ u64_stats_update_end(&(tmp)->syncp); \
+} while (0) \
+
+static inline unsigned int hns3_page_order(struct hns3_enet_ring *ring)
+{
+#if (PAGE_SIZE < 8192)
+ if (ring->buf_size > (PAGE_SIZE / 2))
+ return 1;
+#endif
+ return 0;
+}
+
+#define hns3_page_size(_ring) (PAGE_SIZE << hns3_page_order(_ring))
+
+/* iterator for handling rings in ring group */
+#define hns3_for_each_ring(pos, head) \
+ for (pos = (head).ring; (pos); pos = (pos)->next)
+
+#define hns3_get_handle(ndev) \
+ (((struct hns3_nic_priv *)netdev_priv(ndev))->ae_handle)
+
+#define hns3_get_ae_dev(handle) \
+ (pci_get_drvdata((handle)->pdev))
+
+#define hns3_get_ops(handle) \
+ ((handle)->ae_algo->ops)
+
+#define hns3_gl_usec_to_reg(int_gl) ((int_gl) >> 1)
+#define hns3_gl_round_down(int_gl) round_down(int_gl, 2)
+
+#define hns3_rl_usec_to_reg(int_rl) ((int_rl) >> 2)
+#define hns3_rl_round_down(int_rl) round_down(int_rl, 4)
+
+void hns3_ethtool_set_ops(struct net_device *netdev);
+int hns3_set_channels(struct net_device *netdev,
+ struct ethtool_channels *ch);
+
+void hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget);
+int hns3_init_all_ring(struct hns3_nic_priv *priv);
+int hns3_nic_reset_all_ring(struct hnae3_handle *h);
+void hns3_fini_ring(struct hns3_enet_ring *ring);
+netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev);
+bool hns3_is_phys_func(struct pci_dev *pdev);
+int hns3_clean_rx_ring(
+ struct hns3_enet_ring *ring, int budget,
+ void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *));
+
+void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector,
+ u32 gl_value);
+void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector,
+ u32 gl_value);
+void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
+ u32 rl_value);
+void hns3_set_vector_coalesce_rx_ql(struct hns3_enet_tqp_vector *tqp_vector,
+ u32 ql_value);
+void hns3_set_vector_coalesce_tx_ql(struct hns3_enet_tqp_vector *tqp_vector,
+ u32 ql_value);
+
+void hns3_request_update_promisc_mode(struct hnae3_handle *handle);
+int hns3_reset_notify(struct hnae3_handle *handle,
+ enum hnae3_reset_notify_type type);
+
+#ifdef CONFIG_HNS3_DCB
+void hns3_dcbnl_setup(struct hnae3_handle *handle);
+#else
+static inline void hns3_dcbnl_setup(struct hnae3_handle *handle) {}
+#endif
+
+int hns3_dbg_init(struct hnae3_handle *handle);
+void hns3_dbg_uninit(struct hnae3_handle *handle);
+void hns3_dbg_register_debugfs(const char *debugfs_dir_name);
+void hns3_dbg_unregister_debugfs(void);
+void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size);
+u16 hns3_get_max_available_channels(struct hnae3_handle *h);
+void hns3_cq_period_mode_init(struct hns3_nic_priv *priv,
+ enum dim_cq_period_mode tx_mode,
+ enum dim_cq_period_mode rx_mode);
+
+void hns3_external_lb_prepare(struct net_device *ndev, bool if_running);
+void hns3_external_lb_restore(struct net_device *ndev, bool if_running);
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
new file mode 100644
index 0000000000..682239f330
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -0,0 +1,2180 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
+
+#include <linux/etherdevice.h>
+#include <linux/string.h>
+#include <linux/phy.h>
+#include <linux/sfp.h>
+
+#include "hns3_enet.h"
+#include "hns3_ethtool.h"
+
+/* tqp related stats */
+#define HNS3_TQP_STAT(_string, _member) { \
+ .stats_string = _string, \
+ .stats_offset = offsetof(struct hns3_enet_ring, stats) +\
+ offsetof(struct ring_stats, _member), \
+}
+
+static const struct hns3_stats hns3_txq_stats[] = {
+ /* Tx per-queue statistics */
+ HNS3_TQP_STAT("dropped", sw_err_cnt),
+ HNS3_TQP_STAT("seg_pkt_cnt", seg_pkt_cnt),
+ HNS3_TQP_STAT("packets", tx_pkts),
+ HNS3_TQP_STAT("bytes", tx_bytes),
+ HNS3_TQP_STAT("more", tx_more),
+ HNS3_TQP_STAT("push", tx_push),
+ HNS3_TQP_STAT("mem_doorbell", tx_mem_doorbell),
+ HNS3_TQP_STAT("wake", restart_queue),
+ HNS3_TQP_STAT("busy", tx_busy),
+ HNS3_TQP_STAT("copy", tx_copy),
+ HNS3_TQP_STAT("vlan_err", tx_vlan_err),
+ HNS3_TQP_STAT("l4_proto_err", tx_l4_proto_err),
+ HNS3_TQP_STAT("l2l3l4_err", tx_l2l3l4_err),
+ HNS3_TQP_STAT("tso_err", tx_tso_err),
+ HNS3_TQP_STAT("over_max_recursion", over_max_recursion),
+ HNS3_TQP_STAT("hw_limitation", hw_limitation),
+ HNS3_TQP_STAT("bounce", tx_bounce),
+ HNS3_TQP_STAT("spare_full", tx_spare_full),
+ HNS3_TQP_STAT("copy_bits_err", copy_bits_err),
+ HNS3_TQP_STAT("sgl", tx_sgl),
+ HNS3_TQP_STAT("skb2sgl_err", skb2sgl_err),
+ HNS3_TQP_STAT("map_sg_err", map_sg_err),
+};
+
+#define HNS3_TXQ_STATS_COUNT ARRAY_SIZE(hns3_txq_stats)
+
+static const struct hns3_stats hns3_rxq_stats[] = {
+ /* Rx per-queue statistics */
+ HNS3_TQP_STAT("dropped", sw_err_cnt),
+ HNS3_TQP_STAT("seg_pkt_cnt", seg_pkt_cnt),
+ HNS3_TQP_STAT("packets", rx_pkts),
+ HNS3_TQP_STAT("bytes", rx_bytes),
+ HNS3_TQP_STAT("errors", rx_err_cnt),
+ HNS3_TQP_STAT("reuse_pg_cnt", reuse_pg_cnt),
+ HNS3_TQP_STAT("err_pkt_len", err_pkt_len),
+ HNS3_TQP_STAT("err_bd_num", err_bd_num),
+ HNS3_TQP_STAT("l2_err", l2_err),
+ HNS3_TQP_STAT("l3l4_csum_err", l3l4_csum_err),
+ HNS3_TQP_STAT("csum_complete", csum_complete),
+ HNS3_TQP_STAT("multicast", rx_multicast),
+ HNS3_TQP_STAT("non_reuse_pg", non_reuse_pg),
+ HNS3_TQP_STAT("frag_alloc_err", frag_alloc_err),
+ HNS3_TQP_STAT("frag_alloc", frag_alloc),
+};
+
+#define HNS3_PRIV_FLAGS_LEN ARRAY_SIZE(hns3_priv_flags)
+
+#define HNS3_RXQ_STATS_COUNT ARRAY_SIZE(hns3_rxq_stats)
+
+#define HNS3_TQP_STATS_COUNT (HNS3_TXQ_STATS_COUNT + HNS3_RXQ_STATS_COUNT)
+
+#define HNS3_NIC_LB_TEST_PKT_NUM 1
+#define HNS3_NIC_LB_TEST_RING_ID 0
+#define HNS3_NIC_LB_TEST_PACKET_SIZE 128
+#define HNS3_NIC_LB_SETUP_USEC 10000
+
+/* Nic loopback test err */
+#define HNS3_NIC_LB_TEST_NO_MEM_ERR 1
+#define HNS3_NIC_LB_TEST_TX_CNT_ERR 2
+#define HNS3_NIC_LB_TEST_RX_CNT_ERR 3
+
+static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
+{
+ struct hnae3_handle *h = hns3_get_handle(ndev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ int ret;
+
+ if (!h->ae_algo->ops->set_loopback ||
+ !h->ae_algo->ops->set_promisc_mode)
+ return -EOPNOTSUPP;
+
+ switch (loop) {
+ case HNAE3_LOOP_SERIAL_SERDES:
+ case HNAE3_LOOP_PARALLEL_SERDES:
+ case HNAE3_LOOP_APP:
+ case HNAE3_LOOP_PHY:
+ case HNAE3_LOOP_EXTERNAL:
+ ret = h->ae_algo->ops->set_loopback(h, loop, en);
+ break;
+ default:
+ ret = -ENOTSUPP;
+ break;
+ }
+
+ if (ret || ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
+ return ret;
+
+ if (en)
+ h->ae_algo->ops->set_promisc_mode(h, true, true);
+ else
+ /* recover promisc mode before loopback test */
+ hns3_request_update_promisc_mode(h);
+
+ return ret;
+}
+
+static int hns3_lp_up(struct net_device *ndev, enum hnae3_loop loop_mode)
+{
+ struct hnae3_handle *h = hns3_get_handle(ndev);
+ int ret;
+
+ ret = hns3_nic_reset_all_ring(h);
+ if (ret)
+ return ret;
+
+ ret = hns3_lp_setup(ndev, loop_mode, true);
+ usleep_range(HNS3_NIC_LB_SETUP_USEC, HNS3_NIC_LB_SETUP_USEC * 2);
+
+ return ret;
+}
+
+static int hns3_lp_down(struct net_device *ndev, enum hnae3_loop loop_mode)
+{
+ int ret;
+
+ ret = hns3_lp_setup(ndev, loop_mode, false);
+ if (ret) {
+ netdev_err(ndev, "lb_setup return error: %d\n", ret);
+ return ret;
+ }
+
+ usleep_range(HNS3_NIC_LB_SETUP_USEC, HNS3_NIC_LB_SETUP_USEC * 2);
+
+ return 0;
+}
+
+static void hns3_lp_setup_skb(struct sk_buff *skb)
+{
+#define HNS3_NIC_LB_DST_MAC_ADDR 0x1f
+
+ struct net_device *ndev = skb->dev;
+ struct hnae3_handle *handle;
+ struct hnae3_ae_dev *ae_dev;
+ unsigned char *packet;
+ struct ethhdr *ethh;
+ unsigned int i;
+
+ skb_reserve(skb, NET_IP_ALIGN);
+ ethh = skb_put(skb, sizeof(struct ethhdr));
+ packet = skb_put(skb, HNS3_NIC_LB_TEST_PACKET_SIZE);
+
+ memcpy(ethh->h_dest, ndev->dev_addr, ETH_ALEN);
+
+ /* The dst mac addr of loopback packet is the same as the host'
+ * mac addr, the SSU component may loop back the packet to host
+ * before the packet reaches mac or serdes, which will defect
+ * the purpose of mac or serdes selftest.
+ */
+ handle = hns3_get_handle(ndev);
+ ae_dev = pci_get_drvdata(handle->pdev);
+ if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
+ ethh->h_dest[5] += HNS3_NIC_LB_DST_MAC_ADDR;
+ eth_zero_addr(ethh->h_source);
+ ethh->h_proto = htons(ETH_P_ARP);
+ skb_reset_mac_header(skb);
+
+ for (i = 0; i < HNS3_NIC_LB_TEST_PACKET_SIZE; i++)
+ packet[i] = (unsigned char)(i & 0xff);
+}
+
+static void hns3_lb_check_skb_data(struct hns3_enet_ring *ring,
+ struct sk_buff *skb)
+{
+ struct hns3_enet_tqp_vector *tqp_vector = ring->tqp_vector;
+ unsigned char *packet = skb->data;
+ u32 len = skb_headlen(skb);
+ u32 i;
+
+ len = min_t(u32, len, HNS3_NIC_LB_TEST_PACKET_SIZE);
+
+ for (i = 0; i < len; i++)
+ if (packet[i] != (unsigned char)(i & 0xff))
+ break;
+
+ /* The packet is correctly received */
+ if (i == HNS3_NIC_LB_TEST_PACKET_SIZE)
+ tqp_vector->rx_group.total_packets++;
+ else
+ print_hex_dump(KERN_ERR, "selftest:", DUMP_PREFIX_OFFSET, 16, 1,
+ skb->data, len, true);
+
+ dev_kfree_skb_any(skb);
+}
+
+static u32 hns3_lb_check_rx_ring(struct hns3_nic_priv *priv, u32 budget)
+{
+ struct hnae3_handle *h = priv->ae_handle;
+ struct hnae3_knic_private_info *kinfo;
+ u32 i, rcv_good_pkt_total = 0;
+
+ kinfo = &h->kinfo;
+ for (i = kinfo->num_tqps; i < kinfo->num_tqps * 2; i++) {
+ struct hns3_enet_ring *ring = &priv->ring[i];
+ struct hns3_enet_ring_group *rx_group;
+ u64 pre_rx_pkt;
+
+ rx_group = &ring->tqp_vector->rx_group;
+ pre_rx_pkt = rx_group->total_packets;
+
+ preempt_disable();
+ hns3_clean_rx_ring(ring, budget, hns3_lb_check_skb_data);
+ preempt_enable();
+
+ rcv_good_pkt_total += (rx_group->total_packets - pre_rx_pkt);
+ rx_group->total_packets = pre_rx_pkt;
+ }
+ return rcv_good_pkt_total;
+}
+
+static void hns3_lb_clear_tx_ring(struct hns3_nic_priv *priv, u32 start_ringid,
+ u32 end_ringid)
+{
+ u32 i;
+
+ for (i = start_ringid; i <= end_ringid; i++) {
+ struct hns3_enet_ring *ring = &priv->ring[i];
+
+ hns3_clean_tx_ring(ring, 0);
+ }
+}
+
+/**
+ * hns3_lp_run_test - run loopback test
+ * @ndev: net device
+ * @mode: loopback type
+ *
+ * Return: %0 for success or a NIC loopback test error code on failure
+ */
+static int hns3_lp_run_test(struct net_device *ndev, enum hnae3_loop mode)
+{
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct sk_buff *skb;
+ u32 i, good_cnt;
+ int ret_val = 0;
+
+ skb = alloc_skb(HNS3_NIC_LB_TEST_PACKET_SIZE + ETH_HLEN + NET_IP_ALIGN,
+ GFP_KERNEL);
+ if (!skb)
+ return HNS3_NIC_LB_TEST_NO_MEM_ERR;
+
+ skb->dev = ndev;
+ hns3_lp_setup_skb(skb);
+ skb->queue_mapping = HNS3_NIC_LB_TEST_RING_ID;
+
+ good_cnt = 0;
+ for (i = 0; i < HNS3_NIC_LB_TEST_PKT_NUM; i++) {
+ netdev_tx_t tx_ret;
+
+ skb_get(skb);
+ tx_ret = hns3_nic_net_xmit(skb, ndev);
+ if (tx_ret == NETDEV_TX_OK) {
+ good_cnt++;
+ } else {
+ kfree_skb(skb);
+ netdev_err(ndev, "hns3_lb_run_test xmit failed: %d\n",
+ tx_ret);
+ }
+ }
+ if (good_cnt != HNS3_NIC_LB_TEST_PKT_NUM) {
+ ret_val = HNS3_NIC_LB_TEST_TX_CNT_ERR;
+ netdev_err(ndev, "mode %d sent fail, cnt=0x%x, budget=0x%x\n",
+ mode, good_cnt, HNS3_NIC_LB_TEST_PKT_NUM);
+ goto out;
+ }
+
+ /* Allow 200 milliseconds for packets to go from Tx to Rx */
+ msleep(200);
+
+ good_cnt = hns3_lb_check_rx_ring(priv, HNS3_NIC_LB_TEST_PKT_NUM);
+ if (good_cnt != HNS3_NIC_LB_TEST_PKT_NUM) {
+ ret_val = HNS3_NIC_LB_TEST_RX_CNT_ERR;
+ netdev_err(ndev, "mode %d recv fail, cnt=0x%x, budget=0x%x\n",
+ mode, good_cnt, HNS3_NIC_LB_TEST_PKT_NUM);
+ }
+
+out:
+ hns3_lb_clear_tx_ring(priv, HNS3_NIC_LB_TEST_RING_ID,
+ HNS3_NIC_LB_TEST_RING_ID);
+
+ kfree_skb(skb);
+ return ret_val;
+}
+
+static void hns3_set_selftest_param(struct hnae3_handle *h, int (*st_param)[2])
+{
+ st_param[HNAE3_LOOP_EXTERNAL][0] = HNAE3_LOOP_EXTERNAL;
+ st_param[HNAE3_LOOP_EXTERNAL][1] =
+ h->flags & HNAE3_SUPPORT_EXTERNAL_LOOPBACK;
+
+ st_param[HNAE3_LOOP_APP][0] = HNAE3_LOOP_APP;
+ st_param[HNAE3_LOOP_APP][1] =
+ h->flags & HNAE3_SUPPORT_APP_LOOPBACK;
+
+ st_param[HNAE3_LOOP_SERIAL_SERDES][0] = HNAE3_LOOP_SERIAL_SERDES;
+ st_param[HNAE3_LOOP_SERIAL_SERDES][1] =
+ h->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
+
+ st_param[HNAE3_LOOP_PARALLEL_SERDES][0] =
+ HNAE3_LOOP_PARALLEL_SERDES;
+ st_param[HNAE3_LOOP_PARALLEL_SERDES][1] =
+ h->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
+
+ st_param[HNAE3_LOOP_PHY][0] = HNAE3_LOOP_PHY;
+ st_param[HNAE3_LOOP_PHY][1] =
+ h->flags & HNAE3_SUPPORT_PHY_LOOPBACK;
+}
+
+static void hns3_selftest_prepare(struct net_device *ndev, bool if_running)
+{
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hnae3_handle *h = priv->ae_handle;
+
+ if (if_running)
+ ndev->netdev_ops->ndo_stop(ndev);
+
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
+ /* Disable the vlan filter for selftest does not support it */
+ if (h->ae_algo->ops->enable_vlan_filter &&
+ ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
+ h->ae_algo->ops->enable_vlan_filter(h, false);
+#endif
+
+ /* Tell firmware to stop mac autoneg before loopback test start,
+ * otherwise loopback test may be failed when the port is still
+ * negotiating.
+ */
+ if (h->ae_algo->ops->halt_autoneg)
+ h->ae_algo->ops->halt_autoneg(h, true);
+
+ set_bit(HNS3_NIC_STATE_TESTING, &priv->state);
+}
+
+static void hns3_selftest_restore(struct net_device *ndev, bool if_running)
+{
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hnae3_handle *h = priv->ae_handle;
+
+ clear_bit(HNS3_NIC_STATE_TESTING, &priv->state);
+
+ if (h->ae_algo->ops->halt_autoneg)
+ h->ae_algo->ops->halt_autoneg(h, false);
+
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
+ if (h->ae_algo->ops->enable_vlan_filter &&
+ ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
+ h->ae_algo->ops->enable_vlan_filter(h, true);
+#endif
+
+ if (if_running)
+ ndev->netdev_ops->ndo_open(ndev);
+}
+
+static void hns3_do_selftest(struct net_device *ndev, int (*st_param)[2],
+ struct ethtool_test *eth_test, u64 *data)
+{
+ int test_index = HNAE3_LOOP_APP;
+ u32 i;
+
+ for (i = HNAE3_LOOP_APP; i < HNAE3_LOOP_NONE; i++) {
+ enum hnae3_loop loop_type = (enum hnae3_loop)st_param[i][0];
+
+ if (!st_param[i][1])
+ continue;
+
+ data[test_index] = hns3_lp_up(ndev, loop_type);
+ if (!data[test_index])
+ data[test_index] = hns3_lp_run_test(ndev, loop_type);
+
+ hns3_lp_down(ndev, loop_type);
+
+ if (data[test_index])
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ test_index++;
+ }
+}
+
+static void hns3_do_external_lb(struct net_device *ndev,
+ struct ethtool_test *eth_test, u64 *data)
+{
+ data[HNAE3_LOOP_EXTERNAL] = hns3_lp_up(ndev, HNAE3_LOOP_EXTERNAL);
+ if (!data[HNAE3_LOOP_EXTERNAL])
+ data[HNAE3_LOOP_EXTERNAL] = hns3_lp_run_test(ndev, HNAE3_LOOP_EXTERNAL);
+ hns3_lp_down(ndev, HNAE3_LOOP_EXTERNAL);
+
+ if (data[HNAE3_LOOP_EXTERNAL])
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ eth_test->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
+}
+
+/**
+ * hns3_self_test - self test
+ * @ndev: net device
+ * @eth_test: test cmd
+ * @data: test result
+ */
+static void hns3_self_test(struct net_device *ndev,
+ struct ethtool_test *eth_test, u64 *data)
+{
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hnae3_handle *h = priv->ae_handle;
+ int st_param[HNAE3_LOOP_NONE][2];
+ bool if_running = netif_running(ndev);
+
+ if (hns3_nic_resetting(ndev)) {
+ netdev_err(ndev, "dev resetting!");
+ return;
+ }
+
+ if (!(eth_test->flags & ETH_TEST_FL_OFFLINE))
+ return;
+
+ if (netif_msg_ifdown(h))
+ netdev_info(ndev, "self test start\n");
+
+ hns3_set_selftest_param(h, st_param);
+
+ /* external loopback test requires that the link is up and the duplex is
+ * full, do external test first to reduce the whole test time
+ */
+ if (eth_test->flags & ETH_TEST_FL_EXTERNAL_LB) {
+ hns3_external_lb_prepare(ndev, if_running);
+ hns3_do_external_lb(ndev, eth_test, data);
+ hns3_external_lb_restore(ndev, if_running);
+ }
+
+ hns3_selftest_prepare(ndev, if_running);
+ hns3_do_selftest(ndev, st_param, eth_test, data);
+ hns3_selftest_restore(ndev, if_running);
+
+ if (netif_msg_ifdown(h))
+ netdev_info(ndev, "self test end\n");
+}
+
+static void hns3_update_limit_promisc_mode(struct net_device *netdev,
+ bool enable)
+{
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+
+ if (enable)
+ set_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags);
+ else
+ clear_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags);
+
+ hns3_request_update_promisc_mode(handle);
+}
+
+static const struct hns3_pflag_desc hns3_priv_flags[HNAE3_PFLAG_MAX] = {
+ { "limit_promisc", hns3_update_limit_promisc_mode }
+};
+
+static int hns3_get_sset_count(struct net_device *netdev, int stringset)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ const struct hnae3_ae_ops *ops = h->ae_algo->ops;
+
+ if (!ops->get_sset_count)
+ return -EOPNOTSUPP;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ return ((HNS3_TQP_STATS_COUNT * h->kinfo.num_tqps) +
+ ops->get_sset_count(h, stringset));
+
+ case ETH_SS_TEST:
+ return ops->get_sset_count(h, stringset);
+
+ case ETH_SS_PRIV_FLAGS:
+ return HNAE3_PFLAG_MAX;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats,
+ u32 stat_count, u32 num_tqps, const char *prefix)
+{
+#define MAX_PREFIX_SIZE (6 + 4)
+ u32 size_left;
+ u32 i, j;
+ u32 n1;
+
+ for (i = 0; i < num_tqps; i++) {
+ for (j = 0; j < stat_count; j++) {
+ data[ETH_GSTRING_LEN - 1] = '\0';
+
+ /* first, prepend the prefix string */
+ n1 = scnprintf(data, MAX_PREFIX_SIZE, "%s%u_",
+ prefix, i);
+ size_left = (ETH_GSTRING_LEN - 1) - n1;
+
+ /* now, concatenate the stats string to it */
+ strncat(data, stats[j].stats_string, size_left);
+ data += ETH_GSTRING_LEN;
+ }
+ }
+
+ return data;
+}
+
+static u8 *hns3_get_strings_tqps(struct hnae3_handle *handle, u8 *data)
+{
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ const char tx_prefix[] = "txq";
+ const char rx_prefix[] = "rxq";
+
+ /* get strings for Tx */
+ data = hns3_update_strings(data, hns3_txq_stats, HNS3_TXQ_STATS_COUNT,
+ kinfo->num_tqps, tx_prefix);
+
+ /* get strings for Rx */
+ data = hns3_update_strings(data, hns3_rxq_stats, HNS3_RXQ_STATS_COUNT,
+ kinfo->num_tqps, rx_prefix);
+
+ return data;
+}
+
+static void hns3_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ const struct hnae3_ae_ops *ops = h->ae_algo->ops;
+ char *buff = (char *)data;
+ int i;
+
+ if (!ops->get_strings)
+ return;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ buff = hns3_get_strings_tqps(h, buff);
+ ops->get_strings(h, stringset, (u8 *)buff);
+ break;
+ case ETH_SS_TEST:
+ ops->get_strings(h, stringset, data);
+ break;
+ case ETH_SS_PRIV_FLAGS:
+ for (i = 0; i < HNS3_PRIV_FLAGS_LEN; i++) {
+ snprintf(buff, ETH_GSTRING_LEN, "%s",
+ hns3_priv_flags[i].name);
+ buff += ETH_GSTRING_LEN;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data)
+{
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ struct hns3_nic_priv *nic_priv = handle->priv;
+ struct hns3_enet_ring *ring;
+ u8 *stat;
+ int i, j;
+
+ /* get stats for Tx */
+ for (i = 0; i < kinfo->num_tqps; i++) {
+ ring = &nic_priv->ring[i];
+ for (j = 0; j < HNS3_TXQ_STATS_COUNT; j++) {
+ stat = (u8 *)ring + hns3_txq_stats[j].stats_offset;
+ *data++ = *(u64 *)stat;
+ }
+ }
+
+ /* get stats for Rx */
+ for (i = 0; i < kinfo->num_tqps; i++) {
+ ring = &nic_priv->ring[i + kinfo->num_tqps];
+ for (j = 0; j < HNS3_RXQ_STATS_COUNT; j++) {
+ stat = (u8 *)ring + hns3_rxq_stats[j].stats_offset;
+ *data++ = *(u64 *)stat;
+ }
+ }
+
+ return data;
+}
+
+/* hns3_get_stats - get detail statistics.
+ * @netdev: net device
+ * @stats: statistics info.
+ * @data: statistics data.
+ */
+static void hns3_get_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ u64 *p = data;
+
+ if (hns3_nic_resetting(netdev)) {
+ netdev_err(netdev, "dev resetting, could not get stats\n");
+ return;
+ }
+
+ if (!h->ae_algo->ops->get_stats || !h->ae_algo->ops->update_stats) {
+ netdev_err(netdev, "could not get any statistics\n");
+ return;
+ }
+
+ h->ae_algo->ops->update_stats(h);
+
+ /* get per-queue stats */
+ p = hns3_get_stats_tqps(h, p);
+
+ /* get MAC & other misc hardware stats */
+ h->ae_algo->ops->get_stats(h, p);
+}
+
+static void hns3_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = priv->ae_handle;
+ u32 fw_version;
+
+ if (!h->ae_algo->ops->get_fw_version) {
+ netdev_err(netdev, "could not get fw version!\n");
+ return;
+ }
+
+ strscpy(drvinfo->driver, dev_driver_string(&h->pdev->dev),
+ sizeof(drvinfo->driver));
+
+ strscpy(drvinfo->bus_info, pci_name(h->pdev),
+ sizeof(drvinfo->bus_info));
+
+ fw_version = priv->ae_handle->ae_algo->ops->get_fw_version(h);
+
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%lu.%lu.%lu.%lu",
+ hnae3_get_field(fw_version, HNAE3_FW_VERSION_BYTE3_MASK,
+ HNAE3_FW_VERSION_BYTE3_SHIFT),
+ hnae3_get_field(fw_version, HNAE3_FW_VERSION_BYTE2_MASK,
+ HNAE3_FW_VERSION_BYTE2_SHIFT),
+ hnae3_get_field(fw_version, HNAE3_FW_VERSION_BYTE1_MASK,
+ HNAE3_FW_VERSION_BYTE1_SHIFT),
+ hnae3_get_field(fw_version, HNAE3_FW_VERSION_BYTE0_MASK,
+ HNAE3_FW_VERSION_BYTE0_SHIFT));
+}
+
+static u32 hns3_get_link(struct net_device *netdev)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (h->ae_algo->ops->get_status)
+ return h->ae_algo->ops->get_status(h);
+ else
+ return 0;
+}
+
+static void hns3_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param,
+ struct netlink_ext_ack *extack)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = priv->ae_handle;
+ int rx_queue_index = h->kinfo.num_tqps;
+
+ if (hns3_nic_resetting(netdev) || !priv->ring) {
+ netdev_err(netdev, "failed to get ringparam value, due to dev resetting or uninited\n");
+ return;
+ }
+
+ param->tx_max_pending = HNS3_RING_MAX_PENDING;
+ param->rx_max_pending = HNS3_RING_MAX_PENDING;
+
+ param->tx_pending = priv->ring[0].desc_num;
+ param->rx_pending = priv->ring[rx_queue_index].desc_num;
+ kernel_param->rx_buf_len = priv->ring[rx_queue_index].buf_size;
+ kernel_param->tx_push = test_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE,
+ &priv->state);
+}
+
+static void hns3_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *param)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+
+ if (!test_bit(HNAE3_DEV_SUPPORT_PAUSE_B, ae_dev->caps))
+ return;
+
+ if (h->ae_algo->ops->get_pauseparam)
+ h->ae_algo->ops->get_pauseparam(h, &param->autoneg,
+ &param->rx_pause, &param->tx_pause);
+}
+
+static int hns3_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *param)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+
+ if (!test_bit(HNAE3_DEV_SUPPORT_PAUSE_B, ae_dev->caps))
+ return -EOPNOTSUPP;
+
+ netif_dbg(h, drv, netdev,
+ "set pauseparam: autoneg=%u, rx:%u, tx:%u\n",
+ param->autoneg, param->rx_pause, param->tx_pause);
+
+ if (h->ae_algo->ops->set_pauseparam)
+ return h->ae_algo->ops->set_pauseparam(h, param->autoneg,
+ param->rx_pause,
+ param->tx_pause);
+ return -EOPNOTSUPP;
+}
+
+static void hns3_get_ksettings(struct hnae3_handle *h,
+ struct ethtool_link_ksettings *cmd)
+{
+ const struct hnae3_ae_ops *ops = h->ae_algo->ops;
+
+ /* 1.auto_neg & speed & duplex from cmd */
+ if (ops->get_ksettings_an_result)
+ ops->get_ksettings_an_result(h,
+ &cmd->base.autoneg,
+ &cmd->base.speed,
+ &cmd->base.duplex,
+ &cmd->lanes);
+
+ /* 2.get link mode */
+ if (ops->get_link_mode)
+ ops->get_link_mode(h,
+ cmd->link_modes.supported,
+ cmd->link_modes.advertising);
+
+ /* 3.mdix_ctrl&mdix get from phy reg */
+ if (ops->get_mdix_mode)
+ ops->get_mdix_mode(h, &cmd->base.eth_tp_mdix_ctrl,
+ &cmd->base.eth_tp_mdix);
+}
+
+static int hns3_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ const struct hnae3_ae_ops *ops;
+ u8 module_type;
+ u8 media_type;
+ u8 link_stat;
+
+ ops = h->ae_algo->ops;
+ if (ops->get_media_type)
+ ops->get_media_type(h, &media_type, &module_type);
+ else
+ return -EOPNOTSUPP;
+
+ switch (media_type) {
+ case HNAE3_MEDIA_TYPE_NONE:
+ cmd->base.port = PORT_NONE;
+ hns3_get_ksettings(h, cmd);
+ break;
+ case HNAE3_MEDIA_TYPE_FIBER:
+ if (module_type == HNAE3_MODULE_TYPE_UNKNOWN)
+ cmd->base.port = PORT_OTHER;
+ else if (module_type == HNAE3_MODULE_TYPE_CR)
+ cmd->base.port = PORT_DA;
+ else
+ cmd->base.port = PORT_FIBRE;
+
+ hns3_get_ksettings(h, cmd);
+ break;
+ case HNAE3_MEDIA_TYPE_BACKPLANE:
+ cmd->base.port = PORT_NONE;
+ hns3_get_ksettings(h, cmd);
+ break;
+ case HNAE3_MEDIA_TYPE_COPPER:
+ cmd->base.port = PORT_TP;
+ if (test_bit(HNAE3_DEV_SUPPORT_PHY_IMP_B, ae_dev->caps) &&
+ ops->get_phy_link_ksettings)
+ ops->get_phy_link_ksettings(h, cmd);
+ else if (!netdev->phydev)
+ hns3_get_ksettings(h, cmd);
+ else
+ phy_ethtool_ksettings_get(netdev->phydev, cmd);
+ break;
+ default:
+
+ netdev_warn(netdev, "Unknown media type");
+ return 0;
+ }
+
+ /* mdio_support */
+ cmd->base.mdio_support = ETH_MDIO_SUPPORTS_C22;
+
+ link_stat = hns3_get_link(netdev);
+ if (!link_stat) {
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
+ }
+
+ return 0;
+}
+
+static int hns3_check_ksettings_param(const struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ u8 module_type = HNAE3_MODULE_TYPE_UNKNOWN;
+ u8 media_type = HNAE3_MEDIA_TYPE_UNKNOWN;
+ u32 lane_num;
+ u8 autoneg;
+ u32 speed;
+ u8 duplex;
+ int ret;
+
+ /* hw doesn't support use specified speed and duplex to negotiate,
+ * unnecessary to check them when autoneg on.
+ */
+ if (cmd->base.autoneg)
+ return 0;
+
+ if (ops->get_ksettings_an_result) {
+ ops->get_ksettings_an_result(handle, &autoneg, &speed, &duplex, &lane_num);
+ if (cmd->base.autoneg == autoneg && cmd->base.speed == speed &&
+ cmd->base.duplex == duplex && cmd->lanes == lane_num)
+ return 0;
+ }
+
+ if (ops->get_media_type)
+ ops->get_media_type(handle, &media_type, &module_type);
+
+ if (cmd->base.duplex == DUPLEX_HALF &&
+ media_type != HNAE3_MEDIA_TYPE_COPPER) {
+ netdev_err(netdev,
+ "only copper port supports half duplex!");
+ return -EINVAL;
+ }
+
+ if (ops->check_port_speed) {
+ ret = ops->check_port_speed(handle, cmd->base.speed);
+ if (ret) {
+ netdev_err(netdev, "unsupported speed\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int hns3_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ int ret;
+
+ /* Chip don't support this mode. */
+ if (cmd->base.speed == SPEED_1000 && cmd->base.duplex == DUPLEX_HALF)
+ return -EINVAL;
+
+ if (cmd->lanes && !hnae3_ae_dev_lane_num_supported(ae_dev))
+ return -EOPNOTSUPP;
+
+ netif_dbg(handle, drv, netdev,
+ "set link(%s): autoneg=%u, speed=%u, duplex=%u, lanes=%u\n",
+ netdev->phydev ? "phy" : "mac",
+ cmd->base.autoneg, cmd->base.speed, cmd->base.duplex,
+ cmd->lanes);
+
+ /* Only support ksettings_set for netdev with phy attached for now */
+ if (netdev->phydev) {
+ if (cmd->base.speed == SPEED_1000 &&
+ cmd->base.autoneg == AUTONEG_DISABLE)
+ return -EINVAL;
+
+ return phy_ethtool_ksettings_set(netdev->phydev, cmd);
+ } else if (test_bit(HNAE3_DEV_SUPPORT_PHY_IMP_B, ae_dev->caps) &&
+ ops->set_phy_link_ksettings) {
+ return ops->set_phy_link_ksettings(handle, cmd);
+ }
+
+ if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
+ return -EOPNOTSUPP;
+
+ ret = hns3_check_ksettings_param(netdev, cmd);
+ if (ret)
+ return ret;
+
+ if (ops->set_autoneg) {
+ ret = ops->set_autoneg(handle, cmd->base.autoneg);
+ if (ret)
+ return ret;
+ }
+
+ /* hw doesn't support use specified speed and duplex to negotiate,
+ * ignore them when autoneg on.
+ */
+ if (cmd->base.autoneg) {
+ netdev_info(netdev,
+ "autoneg is on, ignore the speed and duplex\n");
+ return 0;
+ }
+
+ if (ops->cfg_mac_speed_dup_h)
+ ret = ops->cfg_mac_speed_dup_h(handle, cmd->base.speed,
+ cmd->base.duplex, (u8)(cmd->lanes));
+
+ return ret;
+}
+
+static u32 hns3_get_rss_key_size(struct net_device *netdev)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (!h->ae_algo->ops->get_rss_key_size)
+ return 0;
+
+ return h->ae_algo->ops->get_rss_key_size(h);
+}
+
+static u32 hns3_get_rss_indir_size(struct net_device *netdev)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+
+ return ae_dev->dev_specs.rss_ind_tbl_size;
+}
+
+static int hns3_get_rss(struct net_device *netdev, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (!h->ae_algo->ops->get_rss)
+ return -EOPNOTSUPP;
+
+ return h->ae_algo->ops->get_rss(h, indir, key, hfunc);
+}
+
+static int hns3_set_rss(struct net_device *netdev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+
+ if (!h->ae_algo->ops->set_rss)
+ return -EOPNOTSUPP;
+
+ if ((ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2 &&
+ hfunc != ETH_RSS_HASH_TOP) || (hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ hfunc != ETH_RSS_HASH_TOP && hfunc != ETH_RSS_HASH_XOR)) {
+ netdev_err(netdev, "hash func not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (!indir) {
+ netdev_err(netdev,
+ "set rss failed for indir is empty\n");
+ return -EOPNOTSUPP;
+ }
+
+ return h->ae_algo->ops->set_rss(h, indir, key, hfunc);
+}
+
+static int hns3_get_rxnfc(struct net_device *netdev,
+ struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = h->kinfo.num_tqps;
+ return 0;
+ case ETHTOOL_GRXFH:
+ if (h->ae_algo->ops->get_rss_tuple)
+ return h->ae_algo->ops->get_rss_tuple(h, cmd);
+ return -EOPNOTSUPP;
+ case ETHTOOL_GRXCLSRLCNT:
+ if (h->ae_algo->ops->get_fd_rule_cnt)
+ return h->ae_algo->ops->get_fd_rule_cnt(h, cmd);
+ return -EOPNOTSUPP;
+ case ETHTOOL_GRXCLSRULE:
+ if (h->ae_algo->ops->get_fd_rule_info)
+ return h->ae_algo->ops->get_fd_rule_info(h, cmd);
+ return -EOPNOTSUPP;
+ case ETHTOOL_GRXCLSRLALL:
+ if (h->ae_algo->ops->get_fd_all_rules)
+ return h->ae_algo->ops->get_fd_all_rules(h, cmd,
+ rule_locs);
+ return -EOPNOTSUPP;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct hns3_reset_type_map hns3_reset_type[] = {
+ {ETH_RESET_MGMT, HNAE3_IMP_RESET},
+ {ETH_RESET_ALL, HNAE3_GLOBAL_RESET},
+ {ETH_RESET_DEDICATED, HNAE3_FUNC_RESET},
+};
+
+static const struct hns3_reset_type_map hns3vf_reset_type[] = {
+ {ETH_RESET_DEDICATED, HNAE3_VF_FUNC_RESET},
+};
+
+static int hns3_set_reset(struct net_device *netdev, u32 *flags)
+{
+ enum hnae3_reset_type rst_type = HNAE3_NONE_RESET;
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ const struct hnae3_ae_ops *ops = h->ae_algo->ops;
+ const struct hns3_reset_type_map *rst_type_map;
+ enum ethtool_reset_flags rst_flags;
+ u32 i, size;
+
+ if (ops->ae_dev_resetting && ops->ae_dev_resetting(h))
+ return -EBUSY;
+
+ if (!ops->set_default_reset_request || !ops->reset_event)
+ return -EOPNOTSUPP;
+
+ if (h->flags & HNAE3_SUPPORT_VF) {
+ rst_type_map = hns3vf_reset_type;
+ size = ARRAY_SIZE(hns3vf_reset_type);
+ } else {
+ rst_type_map = hns3_reset_type;
+ size = ARRAY_SIZE(hns3_reset_type);
+ }
+
+ for (i = 0; i < size; i++) {
+ if (rst_type_map[i].rst_flags == *flags) {
+ rst_type = rst_type_map[i].rst_type;
+ rst_flags = rst_type_map[i].rst_flags;
+ break;
+ }
+ }
+
+ if (rst_type == HNAE3_NONE_RESET ||
+ (rst_type == HNAE3_IMP_RESET &&
+ ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2))
+ return -EOPNOTSUPP;
+
+ netdev_info(netdev, "Setting reset type %d\n", rst_type);
+
+ ops->set_default_reset_request(ae_dev, rst_type);
+
+ ops->reset_event(h->pdev, h);
+
+ *flags &= ~rst_flags;
+
+ return 0;
+}
+
+static void hns3_change_all_ring_bd_num(struct hns3_nic_priv *priv,
+ u32 tx_desc_num, u32 rx_desc_num)
+{
+ struct hnae3_handle *h = priv->ae_handle;
+ int i;
+
+ h->kinfo.num_tx_desc = tx_desc_num;
+ h->kinfo.num_rx_desc = rx_desc_num;
+
+ for (i = 0; i < h->kinfo.num_tqps; i++) {
+ priv->ring[i].desc_num = tx_desc_num;
+ priv->ring[i + h->kinfo.num_tqps].desc_num = rx_desc_num;
+ }
+}
+
+static struct hns3_enet_ring *hns3_backup_ringparam(struct hns3_nic_priv *priv)
+{
+ struct hnae3_handle *handle = priv->ae_handle;
+ struct hns3_enet_ring *tmp_rings;
+ int i;
+
+ tmp_rings = kcalloc(handle->kinfo.num_tqps * 2,
+ sizeof(struct hns3_enet_ring), GFP_KERNEL);
+ if (!tmp_rings)
+ return NULL;
+
+ for (i = 0; i < handle->kinfo.num_tqps * 2; i++) {
+ memcpy(&tmp_rings[i], &priv->ring[i],
+ sizeof(struct hns3_enet_ring));
+ tmp_rings[i].skb = NULL;
+ }
+
+ return tmp_rings;
+}
+
+static int hns3_check_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param)
+{
+#define RX_BUF_LEN_2K 2048
+#define RX_BUF_LEN_4K 4096
+
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+
+ if (hns3_nic_resetting(ndev) || !priv->ring) {
+ netdev_err(ndev, "failed to set ringparam value, due to dev resetting or uninited\n");
+ return -EBUSY;
+ }
+
+
+ if (param->rx_mini_pending || param->rx_jumbo_pending)
+ return -EINVAL;
+
+ if (kernel_param->rx_buf_len != RX_BUF_LEN_2K &&
+ kernel_param->rx_buf_len != RX_BUF_LEN_4K) {
+ netdev_err(ndev, "Rx buf len only support 2048 and 4096\n");
+ return -EINVAL;
+ }
+
+ if (param->tx_pending > HNS3_RING_MAX_PENDING ||
+ param->tx_pending < HNS3_RING_MIN_PENDING ||
+ param->rx_pending > HNS3_RING_MAX_PENDING ||
+ param->rx_pending < HNS3_RING_MIN_PENDING) {
+ netdev_err(ndev, "Queue depth out of range [%d-%d]\n",
+ HNS3_RING_MIN_PENDING, HNS3_RING_MAX_PENDING);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static bool
+hns3_is_ringparam_changed(struct net_device *ndev,
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param,
+ struct hns3_ring_param *old_ringparam,
+ struct hns3_ring_param *new_ringparam)
+{
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hnae3_handle *h = priv->ae_handle;
+ u16 queue_num = h->kinfo.num_tqps;
+
+ new_ringparam->tx_desc_num = ALIGN(param->tx_pending,
+ HNS3_RING_BD_MULTIPLE);
+ new_ringparam->rx_desc_num = ALIGN(param->rx_pending,
+ HNS3_RING_BD_MULTIPLE);
+ old_ringparam->tx_desc_num = priv->ring[0].desc_num;
+ old_ringparam->rx_desc_num = priv->ring[queue_num].desc_num;
+ old_ringparam->rx_buf_len = priv->ring[queue_num].buf_size;
+ new_ringparam->rx_buf_len = kernel_param->rx_buf_len;
+
+ if (old_ringparam->tx_desc_num == new_ringparam->tx_desc_num &&
+ old_ringparam->rx_desc_num == new_ringparam->rx_desc_num &&
+ old_ringparam->rx_buf_len == new_ringparam->rx_buf_len) {
+ netdev_info(ndev, "descriptor number and rx buffer length not changed\n");
+ return false;
+ }
+
+ return true;
+}
+
+static int hns3_change_rx_buf_len(struct net_device *ndev, u32 rx_buf_len)
+{
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hnae3_handle *h = priv->ae_handle;
+ int i;
+
+ h->kinfo.rx_buf_len = rx_buf_len;
+
+ for (i = 0; i < h->kinfo.num_tqps; i++) {
+ h->kinfo.tqp[i]->buf_size = rx_buf_len;
+ priv->ring[i + h->kinfo.num_tqps].buf_size = rx_buf_len;
+ }
+
+ return 0;
+}
+
+static int hns3_set_tx_push(struct net_device *netdev, u32 tx_push)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ u32 old_state = test_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state);
+
+ if (!test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps) && tx_push)
+ return -EOPNOTSUPP;
+
+ if (tx_push == old_state)
+ return 0;
+
+ netdev_dbg(netdev, "Changing tx push from %s to %s\n",
+ old_state ? "on" : "off", tx_push ? "on" : "off");
+
+ if (tx_push)
+ set_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state);
+ else
+ clear_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state);
+
+ return 0;
+}
+
+static int hns3_set_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param,
+ struct netlink_ext_ack *extack)
+{
+ struct hns3_ring_param old_ringparam, new_ringparam;
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hnae3_handle *h = priv->ae_handle;
+ struct hns3_enet_ring *tmp_rings;
+ bool if_running = netif_running(ndev);
+ int ret, i;
+
+ ret = hns3_check_ringparam(ndev, param, kernel_param);
+ if (ret)
+ return ret;
+
+ ret = hns3_set_tx_push(ndev, kernel_param->tx_push);
+ if (ret)
+ return ret;
+
+ if (!hns3_is_ringparam_changed(ndev, param, kernel_param,
+ &old_ringparam, &new_ringparam))
+ return 0;
+
+ tmp_rings = hns3_backup_ringparam(priv);
+ if (!tmp_rings) {
+ netdev_err(ndev, "backup ring param failed by allocating memory fail\n");
+ return -ENOMEM;
+ }
+
+ netdev_info(ndev,
+ "Changing Tx/Rx ring depth from %u/%u to %u/%u, Changing rx buffer len from %u to %u\n",
+ old_ringparam.tx_desc_num, old_ringparam.rx_desc_num,
+ new_ringparam.tx_desc_num, new_ringparam.rx_desc_num,
+ old_ringparam.rx_buf_len, new_ringparam.rx_buf_len);
+
+ if (if_running)
+ ndev->netdev_ops->ndo_stop(ndev);
+
+ hns3_change_all_ring_bd_num(priv, new_ringparam.tx_desc_num,
+ new_ringparam.rx_desc_num);
+ hns3_change_rx_buf_len(ndev, new_ringparam.rx_buf_len);
+ ret = hns3_init_all_ring(priv);
+ if (ret) {
+ netdev_err(ndev, "set ringparam fail, revert to old value(%d)\n",
+ ret);
+
+ hns3_change_rx_buf_len(ndev, old_ringparam.rx_buf_len);
+ hns3_change_all_ring_bd_num(priv, old_ringparam.tx_desc_num,
+ old_ringparam.rx_desc_num);
+ for (i = 0; i < h->kinfo.num_tqps * 2; i++)
+ memcpy(&priv->ring[i], &tmp_rings[i],
+ sizeof(struct hns3_enet_ring));
+ } else {
+ for (i = 0; i < h->kinfo.num_tqps * 2; i++)
+ hns3_fini_ring(&tmp_rings[i]);
+ }
+
+ kfree(tmp_rings);
+
+ if (if_running)
+ ret = ndev->netdev_ops->ndo_open(ndev);
+
+ return ret;
+}
+
+static int hns3_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXFH:
+ if (h->ae_algo->ops->set_rss_tuple)
+ return h->ae_algo->ops->set_rss_tuple(h, cmd);
+ return -EOPNOTSUPP;
+ case ETHTOOL_SRXCLSRLINS:
+ if (h->ae_algo->ops->add_fd_entry)
+ return h->ae_algo->ops->add_fd_entry(h, cmd);
+ return -EOPNOTSUPP;
+ case ETHTOOL_SRXCLSRLDEL:
+ if (h->ae_algo->ops->del_fd_entry)
+ return h->ae_algo->ops->del_fd_entry(h, cmd);
+ return -EOPNOTSUPP;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int hns3_nway_reset(struct net_device *netdev)
+{
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ struct phy_device *phy = netdev->phydev;
+ int autoneg;
+
+ if (!netif_running(netdev))
+ return 0;
+
+ if (hns3_nic_resetting(netdev)) {
+ netdev_err(netdev, "dev resetting!");
+ return -EBUSY;
+ }
+
+ if (!ops->get_autoneg || !ops->restart_autoneg)
+ return -EOPNOTSUPP;
+
+ autoneg = ops->get_autoneg(handle);
+ if (autoneg != AUTONEG_ENABLE) {
+ netdev_err(netdev,
+ "Autoneg is off, don't support to restart it\n");
+ return -EINVAL;
+ }
+
+ netif_dbg(handle, drv, netdev,
+ "nway reset (using %s)\n", phy ? "phy" : "mac");
+
+ if (phy)
+ return genphy_restart_aneg(phy);
+
+ return ops->restart_autoneg(handle);
+}
+
+static void hns3_get_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (h->ae_algo->ops->get_channels)
+ h->ae_algo->ops->get_channels(h, ch);
+}
+
+static int hns3_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *cmd,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hns3_enet_coalesce *tx_coal = &priv->tx_coal;
+ struct hns3_enet_coalesce *rx_coal = &priv->rx_coal;
+ struct hnae3_handle *h = priv->ae_handle;
+
+ if (hns3_nic_resetting(netdev))
+ return -EBUSY;
+
+ cmd->use_adaptive_tx_coalesce = tx_coal->adapt_enable;
+ cmd->use_adaptive_rx_coalesce = rx_coal->adapt_enable;
+
+ cmd->tx_coalesce_usecs = tx_coal->int_gl;
+ cmd->rx_coalesce_usecs = rx_coal->int_gl;
+
+ cmd->tx_coalesce_usecs_high = h->kinfo.int_rl_setting;
+ cmd->rx_coalesce_usecs_high = h->kinfo.int_rl_setting;
+
+ cmd->tx_max_coalesced_frames = tx_coal->int_ql;
+ cmd->rx_max_coalesced_frames = rx_coal->int_ql;
+
+ kernel_coal->use_cqe_mode_tx = (priv->tx_cqe_mode ==
+ DIM_CQ_PERIOD_MODE_START_FROM_CQE);
+ kernel_coal->use_cqe_mode_rx = (priv->rx_cqe_mode ==
+ DIM_CQ_PERIOD_MODE_START_FROM_CQE);
+
+ return 0;
+}
+
+static int hns3_check_gl_coalesce_para(struct net_device *netdev,
+ struct ethtool_coalesce *cmd)
+{
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
+ u32 rx_gl, tx_gl;
+
+ if (cmd->rx_coalesce_usecs > ae_dev->dev_specs.max_int_gl) {
+ netdev_err(netdev,
+ "invalid rx-usecs value, rx-usecs range is 0-%u\n",
+ ae_dev->dev_specs.max_int_gl);
+ return -EINVAL;
+ }
+
+ if (cmd->tx_coalesce_usecs > ae_dev->dev_specs.max_int_gl) {
+ netdev_err(netdev,
+ "invalid tx-usecs value, tx-usecs range is 0-%u\n",
+ ae_dev->dev_specs.max_int_gl);
+ return -EINVAL;
+ }
+
+ /* device version above V3(include V3), GL uses 1us unit,
+ * so the round down is not needed.
+ */
+ if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
+ return 0;
+
+ rx_gl = hns3_gl_round_down(cmd->rx_coalesce_usecs);
+ if (rx_gl != cmd->rx_coalesce_usecs) {
+ netdev_info(netdev,
+ "rx_usecs(%u) rounded down to %u, because it must be multiple of 2.\n",
+ cmd->rx_coalesce_usecs, rx_gl);
+ }
+
+ tx_gl = hns3_gl_round_down(cmd->tx_coalesce_usecs);
+ if (tx_gl != cmd->tx_coalesce_usecs) {
+ netdev_info(netdev,
+ "tx_usecs(%u) rounded down to %u, because it must be multiple of 2.\n",
+ cmd->tx_coalesce_usecs, tx_gl);
+ }
+
+ return 0;
+}
+
+static int hns3_check_rl_coalesce_para(struct net_device *netdev,
+ struct ethtool_coalesce *cmd)
+{
+ u32 rl;
+
+ if (cmd->tx_coalesce_usecs_high != cmd->rx_coalesce_usecs_high) {
+ netdev_err(netdev,
+ "tx_usecs_high must be same as rx_usecs_high.\n");
+ return -EINVAL;
+ }
+
+ if (cmd->rx_coalesce_usecs_high > HNS3_INT_RL_MAX) {
+ netdev_err(netdev,
+ "Invalid usecs_high value, usecs_high range is 0-%d\n",
+ HNS3_INT_RL_MAX);
+ return -EINVAL;
+ }
+
+ rl = hns3_rl_round_down(cmd->rx_coalesce_usecs_high);
+ if (rl != cmd->rx_coalesce_usecs_high) {
+ netdev_info(netdev,
+ "usecs_high(%u) rounded down to %u, because it must be multiple of 4.\n",
+ cmd->rx_coalesce_usecs_high, rl);
+ }
+
+ return 0;
+}
+
+static int hns3_check_ql_coalesce_param(struct net_device *netdev,
+ struct ethtool_coalesce *cmd)
+{
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
+
+ if ((cmd->tx_max_coalesced_frames || cmd->rx_max_coalesced_frames) &&
+ !ae_dev->dev_specs.int_ql_max) {
+ netdev_err(netdev, "coalesced frames is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (cmd->tx_max_coalesced_frames > ae_dev->dev_specs.int_ql_max ||
+ cmd->rx_max_coalesced_frames > ae_dev->dev_specs.int_ql_max) {
+ netdev_err(netdev,
+ "invalid coalesced_frames value, range is 0-%u\n",
+ ae_dev->dev_specs.int_ql_max);
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
+static int
+hns3_check_cqe_coalesce_param(struct net_device *netdev,
+ struct kernel_ethtool_coalesce *kernel_coal)
+{
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
+
+ if ((kernel_coal->use_cqe_mode_tx || kernel_coal->use_cqe_mode_rx) &&
+ !hnae3_ae_dev_cq_supported(ae_dev)) {
+ netdev_err(netdev, "coalesced cqe mode is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int
+hns3_check_coalesce_para(struct net_device *netdev,
+ struct ethtool_coalesce *cmd,
+ struct kernel_ethtool_coalesce *kernel_coal)
+{
+ int ret;
+
+ ret = hns3_check_cqe_coalesce_param(netdev, kernel_coal);
+ if (ret)
+ return ret;
+
+ ret = hns3_check_gl_coalesce_para(netdev, cmd);
+ if (ret) {
+ netdev_err(netdev,
+ "Check gl coalesce param fail. ret = %d\n", ret);
+ return ret;
+ }
+
+ ret = hns3_check_rl_coalesce_para(netdev, cmd);
+ if (ret) {
+ netdev_err(netdev,
+ "Check rl coalesce param fail. ret = %d\n", ret);
+ return ret;
+ }
+
+ return hns3_check_ql_coalesce_param(netdev, cmd);
+}
+
+static void hns3_set_coalesce_per_queue(struct net_device *netdev,
+ struct ethtool_coalesce *cmd,
+ u32 queue)
+{
+ struct hns3_enet_tqp_vector *tx_vector, *rx_vector;
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = priv->ae_handle;
+ int queue_num = h->kinfo.num_tqps;
+
+ tx_vector = priv->ring[queue].tqp_vector;
+ rx_vector = priv->ring[queue_num + queue].tqp_vector;
+
+ tx_vector->tx_group.coal.adapt_enable =
+ cmd->use_adaptive_tx_coalesce;
+ rx_vector->rx_group.coal.adapt_enable =
+ cmd->use_adaptive_rx_coalesce;
+
+ tx_vector->tx_group.coal.int_gl = cmd->tx_coalesce_usecs;
+ rx_vector->rx_group.coal.int_gl = cmd->rx_coalesce_usecs;
+
+ tx_vector->tx_group.coal.int_ql = cmd->tx_max_coalesced_frames;
+ rx_vector->rx_group.coal.int_ql = cmd->rx_max_coalesced_frames;
+
+ hns3_set_vector_coalesce_tx_gl(tx_vector,
+ tx_vector->tx_group.coal.int_gl);
+ hns3_set_vector_coalesce_rx_gl(rx_vector,
+ rx_vector->rx_group.coal.int_gl);
+
+ hns3_set_vector_coalesce_rl(tx_vector, h->kinfo.int_rl_setting);
+ hns3_set_vector_coalesce_rl(rx_vector, h->kinfo.int_rl_setting);
+
+ if (tx_vector->tx_group.coal.ql_enable)
+ hns3_set_vector_coalesce_tx_ql(tx_vector,
+ tx_vector->tx_group.coal.int_ql);
+ if (rx_vector->rx_group.coal.ql_enable)
+ hns3_set_vector_coalesce_rx_ql(rx_vector,
+ rx_vector->rx_group.coal.int_ql);
+}
+
+static int hns3_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *cmd,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hns3_enet_coalesce *tx_coal = &priv->tx_coal;
+ struct hns3_enet_coalesce *rx_coal = &priv->rx_coal;
+ u16 queue_num = h->kinfo.num_tqps;
+ enum dim_cq_period_mode tx_mode;
+ enum dim_cq_period_mode rx_mode;
+ int ret;
+ int i;
+
+ if (hns3_nic_resetting(netdev))
+ return -EBUSY;
+
+ ret = hns3_check_coalesce_para(netdev, cmd, kernel_coal);
+ if (ret)
+ return ret;
+
+ h->kinfo.int_rl_setting =
+ hns3_rl_round_down(cmd->rx_coalesce_usecs_high);
+
+ tx_coal->adapt_enable = cmd->use_adaptive_tx_coalesce;
+ rx_coal->adapt_enable = cmd->use_adaptive_rx_coalesce;
+
+ tx_coal->int_gl = cmd->tx_coalesce_usecs;
+ rx_coal->int_gl = cmd->rx_coalesce_usecs;
+
+ tx_coal->int_ql = cmd->tx_max_coalesced_frames;
+ rx_coal->int_ql = cmd->rx_max_coalesced_frames;
+
+ for (i = 0; i < queue_num; i++)
+ hns3_set_coalesce_per_queue(netdev, cmd, i);
+
+ tx_mode = kernel_coal->use_cqe_mode_tx ?
+ DIM_CQ_PERIOD_MODE_START_FROM_CQE :
+ DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ rx_mode = kernel_coal->use_cqe_mode_rx ?
+ DIM_CQ_PERIOD_MODE_START_FROM_CQE :
+ DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ hns3_cq_period_mode_init(priv, tx_mode, rx_mode);
+
+ return 0;
+}
+
+static int hns3_get_regs_len(struct net_device *netdev)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (!h->ae_algo->ops->get_regs_len)
+ return -EOPNOTSUPP;
+
+ return h->ae_algo->ops->get_regs_len(h);
+}
+
+static void hns3_get_regs(struct net_device *netdev,
+ struct ethtool_regs *cmd, void *data)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (!h->ae_algo->ops->get_regs)
+ return;
+
+ h->ae_algo->ops->get_regs(h, &cmd->version, data);
+}
+
+static int hns3_set_phys_id(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (!h->ae_algo->ops->set_led_id)
+ return -EOPNOTSUPP;
+
+ return h->ae_algo->ops->set_led_id(h, state);
+}
+
+static u32 hns3_get_msglevel(struct net_device *netdev)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ return h->msg_enable;
+}
+
+static void hns3_set_msglevel(struct net_device *netdev, u32 msg_level)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ h->msg_enable = msg_level;
+}
+
+static void hns3_get_fec_stats(struct net_device *netdev,
+ struct ethtool_fec_stats *fec_stats)
+{
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+
+ if (!hnae3_ae_dev_fec_stats_supported(ae_dev) || !ops->get_fec_stats)
+ return;
+
+ ops->get_fec_stats(handle, fec_stats);
+}
+
+/* Translate local fec value into ethtool value. */
+static unsigned int loc_to_eth_fec(u8 loc_fec)
+{
+ u32 eth_fec = 0;
+
+ if (loc_fec & BIT(HNAE3_FEC_AUTO))
+ eth_fec |= ETHTOOL_FEC_AUTO;
+ if (loc_fec & BIT(HNAE3_FEC_RS))
+ eth_fec |= ETHTOOL_FEC_RS;
+ if (loc_fec & BIT(HNAE3_FEC_LLRS))
+ eth_fec |= ETHTOOL_FEC_LLRS;
+ if (loc_fec & BIT(HNAE3_FEC_BASER))
+ eth_fec |= ETHTOOL_FEC_BASER;
+ if (loc_fec & BIT(HNAE3_FEC_NONE))
+ eth_fec |= ETHTOOL_FEC_OFF;
+
+ return eth_fec;
+}
+
+/* Translate ethtool fec value into local value. */
+static unsigned int eth_to_loc_fec(unsigned int eth_fec)
+{
+ u32 loc_fec = 0;
+
+ if (eth_fec & ETHTOOL_FEC_OFF)
+ loc_fec |= BIT(HNAE3_FEC_NONE);
+ if (eth_fec & ETHTOOL_FEC_AUTO)
+ loc_fec |= BIT(HNAE3_FEC_AUTO);
+ if (eth_fec & ETHTOOL_FEC_RS)
+ loc_fec |= BIT(HNAE3_FEC_RS);
+ if (eth_fec & ETHTOOL_FEC_LLRS)
+ loc_fec |= BIT(HNAE3_FEC_LLRS);
+ if (eth_fec & ETHTOOL_FEC_BASER)
+ loc_fec |= BIT(HNAE3_FEC_BASER);
+
+ return loc_fec;
+}
+
+static int hns3_get_fecparam(struct net_device *netdev,
+ struct ethtool_fecparam *fec)
+{
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ u8 fec_ability;
+ u8 fec_mode;
+
+ if (!test_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps))
+ return -EOPNOTSUPP;
+
+ if (!ops->get_fec)
+ return -EOPNOTSUPP;
+
+ ops->get_fec(handle, &fec_ability, &fec_mode);
+
+ fec->fec = loc_to_eth_fec(fec_ability);
+ fec->active_fec = loc_to_eth_fec(fec_mode);
+ if (!fec->active_fec)
+ fec->active_fec = ETHTOOL_FEC_OFF;
+
+ return 0;
+}
+
+static int hns3_set_fecparam(struct net_device *netdev,
+ struct ethtool_fecparam *fec)
+{
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ u32 fec_mode;
+
+ if (!test_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps))
+ return -EOPNOTSUPP;
+
+ if (!ops->set_fec)
+ return -EOPNOTSUPP;
+ fec_mode = eth_to_loc_fec(fec->fec);
+
+ netif_dbg(handle, drv, netdev, "set fecparam: mode=%u\n", fec_mode);
+
+ return ops->set_fec(handle, fec_mode);
+}
+
+static int hns3_get_module_info(struct net_device *netdev,
+ struct ethtool_modinfo *modinfo)
+{
+#define HNS3_SFF_8636_V1_3 0x03
+
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ struct hns3_sfp_type sfp_type;
+ int ret;
+
+ if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2 ||
+ !ops->get_module_eeprom)
+ return -EOPNOTSUPP;
+
+ memset(&sfp_type, 0, sizeof(sfp_type));
+ ret = ops->get_module_eeprom(handle, 0, sizeof(sfp_type) / sizeof(u8),
+ (u8 *)&sfp_type);
+ if (ret)
+ return ret;
+
+ switch (sfp_type.type) {
+ case SFF8024_ID_SFP:
+ modinfo->type = ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ break;
+ case SFF8024_ID_QSFP_8438:
+ modinfo->type = ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN;
+ break;
+ case SFF8024_ID_QSFP_8436_8636:
+ if (sfp_type.ext_type < HNS3_SFF_8636_V1_3) {
+ modinfo->type = ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN;
+ } else {
+ modinfo->type = ETH_MODULE_SFF_8636;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN;
+ }
+ break;
+ case SFF8024_ID_QSFP28_8636:
+ modinfo->type = ETH_MODULE_SFF_8636;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN;
+ break;
+ default:
+ netdev_err(netdev, "Optical module unknown: %#x\n",
+ sfp_type.type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hns3_get_module_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *ee, u8 *data)
+{
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+
+ if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2 ||
+ !ops->get_module_eeprom)
+ return -EOPNOTSUPP;
+
+ if (!ee->len)
+ return -EINVAL;
+
+ memset(data, 0, ee->len);
+
+ return ops->get_module_eeprom(handle, ee->offset, ee->len, data);
+}
+
+static u32 hns3_get_priv_flags(struct net_device *netdev)
+{
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+
+ return handle->priv_flags;
+}
+
+static int hns3_check_priv_flags(struct hnae3_handle *h, u32 changed)
+{
+ u32 i;
+
+ for (i = 0; i < HNAE3_PFLAG_MAX; i++)
+ if ((changed & BIT(i)) && !test_bit(i, &h->supported_pflags)) {
+ netdev_err(h->netdev, "%s is unsupported\n",
+ hns3_priv_flags[i].name);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int hns3_set_priv_flags(struct net_device *netdev, u32 pflags)
+{
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+ u32 changed = pflags ^ handle->priv_flags;
+ int ret;
+ u32 i;
+
+ ret = hns3_check_priv_flags(handle, changed);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < HNAE3_PFLAG_MAX; i++) {
+ if (changed & BIT(i)) {
+ bool enable = !(handle->priv_flags & BIT(i));
+
+ if (enable)
+ handle->priv_flags |= BIT(i);
+ else
+ handle->priv_flags &= ~BIT(i);
+ hns3_priv_flags[i].handler(netdev, enable);
+ }
+ }
+
+ return 0;
+}
+
+static int hns3_get_tunable(struct net_device *netdev,
+ const struct ethtool_tunable *tuna,
+ void *data)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = priv->ae_handle;
+ int ret = 0;
+
+ switch (tuna->id) {
+ case ETHTOOL_TX_COPYBREAK:
+ /* all the tx rings have the same tx_copybreak */
+ *(u32 *)data = priv->tx_copybreak;
+ break;
+ case ETHTOOL_RX_COPYBREAK:
+ *(u32 *)data = priv->rx_copybreak;
+ break;
+ case ETHTOOL_TX_COPYBREAK_BUF_SIZE:
+ *(u32 *)data = h->kinfo.tx_spare_buf_size;
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+static int hns3_set_tx_spare_buf_size(struct net_device *netdev,
+ u32 data)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = priv->ae_handle;
+ int ret;
+
+ h->kinfo.tx_spare_buf_size = data;
+
+ ret = hns3_reset_notify(h, HNAE3_DOWN_CLIENT);
+ if (ret)
+ return ret;
+
+ ret = hns3_reset_notify(h, HNAE3_UNINIT_CLIENT);
+ if (ret)
+ return ret;
+
+ ret = hns3_reset_notify(h, HNAE3_INIT_CLIENT);
+ if (ret)
+ return ret;
+
+ ret = hns3_reset_notify(h, HNAE3_UP_CLIENT);
+ if (ret)
+ hns3_reset_notify(h, HNAE3_UNINIT_CLIENT);
+
+ return ret;
+}
+
+static int hns3_set_tunable(struct net_device *netdev,
+ const struct ethtool_tunable *tuna,
+ const void *data)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ u32 old_tx_spare_buf_size, new_tx_spare_buf_size;
+ struct hnae3_handle *h = priv->ae_handle;
+ int i, ret = 0;
+
+ if (hns3_nic_resetting(netdev) || !priv->ring) {
+ netdev_err(netdev, "failed to set tunable value, dev resetting!");
+ return -EBUSY;
+ }
+
+ switch (tuna->id) {
+ case ETHTOOL_TX_COPYBREAK:
+ priv->tx_copybreak = *(u32 *)data;
+
+ for (i = 0; i < h->kinfo.num_tqps; i++)
+ priv->ring[i].tx_copybreak = priv->tx_copybreak;
+
+ break;
+ case ETHTOOL_RX_COPYBREAK:
+ priv->rx_copybreak = *(u32 *)data;
+
+ for (i = h->kinfo.num_tqps; i < h->kinfo.num_tqps * 2; i++)
+ priv->ring[i].rx_copybreak = priv->rx_copybreak;
+
+ break;
+ case ETHTOOL_TX_COPYBREAK_BUF_SIZE:
+ old_tx_spare_buf_size = h->kinfo.tx_spare_buf_size;
+ new_tx_spare_buf_size = *(u32 *)data;
+ netdev_info(netdev, "request to set tx spare buf size from %u to %u\n",
+ old_tx_spare_buf_size, new_tx_spare_buf_size);
+ ret = hns3_set_tx_spare_buf_size(netdev, new_tx_spare_buf_size);
+ if (ret ||
+ (!priv->ring->tx_spare && new_tx_spare_buf_size != 0)) {
+ int ret1;
+
+ netdev_warn(netdev, "change tx spare buf size fail, revert to old value\n");
+ ret1 = hns3_set_tx_spare_buf_size(netdev,
+ old_tx_spare_buf_size);
+ if (ret1) {
+ netdev_err(netdev, "revert to old tx spare buf size fail\n");
+ return ret1;
+ }
+
+ return ret;
+ }
+
+ if (!priv->ring->tx_spare)
+ netdev_info(netdev, "the active tx spare buf size is 0, disable tx spare buffer\n");
+ else
+ netdev_info(netdev, "the active tx spare buf size is %u, due to page order\n",
+ priv->ring->tx_spare->len);
+
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+#define HNS3_ETHTOOL_COALESCE (ETHTOOL_COALESCE_USECS | \
+ ETHTOOL_COALESCE_USE_ADAPTIVE | \
+ ETHTOOL_COALESCE_RX_USECS_HIGH | \
+ ETHTOOL_COALESCE_TX_USECS_HIGH | \
+ ETHTOOL_COALESCE_MAX_FRAMES | \
+ ETHTOOL_COALESCE_USE_CQE)
+
+#define HNS3_ETHTOOL_RING (ETHTOOL_RING_USE_RX_BUF_LEN | \
+ ETHTOOL_RING_USE_TX_PUSH)
+
+static int hns3_get_ts_info(struct net_device *netdev,
+ struct ethtool_ts_info *info)
+{
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+
+ if (handle->ae_algo->ops->get_ts_info)
+ return handle->ae_algo->ops->get_ts_info(handle, info);
+
+ return ethtool_op_get_ts_info(netdev, info);
+}
+
+static const struct hns3_ethtool_link_ext_state_mapping
+hns3_link_ext_state_map[] = {
+ {1, ETHTOOL_LINK_EXT_STATE_AUTONEG,
+ ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_HCD},
+ {2, ETHTOOL_LINK_EXT_STATE_AUTONEG,
+ ETHTOOL_LINK_EXT_SUBSTATE_AN_ACK_NOT_RECEIVED},
+
+ {256, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE,
+ ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_INHIBIT_TIMEOUT},
+ {257, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE,
+ ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_PARTNER_DID_NOT_SET_RECEIVER_READY},
+ {512, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE,
+ ETHTOOL_LINK_EXT_SUBSTATE_LT_REMOTE_FAULT},
+
+ {513, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH,
+ ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_BLOCK_LOCK},
+ {514, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH,
+ ETHTOOL_LINK_EXT_SUBSTATE_LLM_FC_FEC_IS_NOT_LOCKED},
+ {515, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH,
+ ETHTOOL_LINK_EXT_SUBSTATE_LLM_RS_FEC_IS_NOT_LOCKED},
+
+ {768, ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY,
+ ETHTOOL_LINK_EXT_SUBSTATE_BSI_LARGE_NUMBER_OF_PHYSICAL_ERRORS},
+ {769, ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY,
+ ETHTOOL_LINK_EXT_SUBSTATE_BSI_SERDES_REFERENCE_CLOCK_LOST},
+ {770, ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY,
+ ETHTOOL_LINK_EXT_SUBSTATE_BSI_SERDES_ALOS},
+
+ {1024, ETHTOOL_LINK_EXT_STATE_NO_CABLE, 0},
+ {1025, ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE,
+ ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE},
+
+ {1026, ETHTOOL_LINK_EXT_STATE_EEPROM_ISSUE, 0},
+};
+
+static int hns3_get_link_ext_state(struct net_device *netdev,
+ struct ethtool_link_ext_state_info *info)
+{
+ const struct hns3_ethtool_link_ext_state_mapping *map;
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ u32 status_code, i;
+ int ret;
+
+ if (netif_carrier_ok(netdev))
+ return -ENODATA;
+
+ if (!h->ae_algo->ops->get_link_diagnosis_info)
+ return -EOPNOTSUPP;
+
+ ret = h->ae_algo->ops->get_link_diagnosis_info(h, &status_code);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(hns3_link_ext_state_map); i++) {
+ map = &hns3_link_ext_state_map[i];
+ if (map->status_code == status_code) {
+ info->link_ext_state = map->link_ext_state;
+ info->__link_ext_substate = map->link_ext_substate;
+ return 0;
+ }
+ }
+
+ return -ENODATA;
+}
+
+static void hns3_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+ const struct hnae3_ae_ops *ops = hns3_get_ops(handle);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
+
+ if (!hnae3_ae_dev_wol_supported(ae_dev))
+ return;
+
+ ops->get_wol(handle, wol);
+}
+
+static int hns3_set_wol(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
+{
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+ const struct hnae3_ae_ops *ops = hns3_get_ops(handle);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
+
+ if (!hnae3_ae_dev_wol_supported(ae_dev))
+ return -EOPNOTSUPP;
+
+ return ops->set_wol(handle, wol);
+}
+
+static const struct ethtool_ops hns3vf_ethtool_ops = {
+ .supported_coalesce_params = HNS3_ETHTOOL_COALESCE,
+ .supported_ring_params = HNS3_ETHTOOL_RING,
+ .get_drvinfo = hns3_get_drvinfo,
+ .get_ringparam = hns3_get_ringparam,
+ .set_ringparam = hns3_set_ringparam,
+ .get_strings = hns3_get_strings,
+ .get_ethtool_stats = hns3_get_stats,
+ .get_sset_count = hns3_get_sset_count,
+ .get_rxnfc = hns3_get_rxnfc,
+ .set_rxnfc = hns3_set_rxnfc,
+ .get_rxfh_key_size = hns3_get_rss_key_size,
+ .get_rxfh_indir_size = hns3_get_rss_indir_size,
+ .get_rxfh = hns3_get_rss,
+ .set_rxfh = hns3_set_rss,
+ .get_link_ksettings = hns3_get_link_ksettings,
+ .get_channels = hns3_get_channels,
+ .set_channels = hns3_set_channels,
+ .get_coalesce = hns3_get_coalesce,
+ .set_coalesce = hns3_set_coalesce,
+ .get_regs_len = hns3_get_regs_len,
+ .get_regs = hns3_get_regs,
+ .get_link = hns3_get_link,
+ .get_msglevel = hns3_get_msglevel,
+ .set_msglevel = hns3_set_msglevel,
+ .get_priv_flags = hns3_get_priv_flags,
+ .set_priv_flags = hns3_set_priv_flags,
+ .get_tunable = hns3_get_tunable,
+ .set_tunable = hns3_set_tunable,
+ .reset = hns3_set_reset,
+};
+
+static const struct ethtool_ops hns3_ethtool_ops = {
+ .supported_coalesce_params = HNS3_ETHTOOL_COALESCE,
+ .supported_ring_params = HNS3_ETHTOOL_RING,
+ .cap_link_lanes_supported = true,
+ .self_test = hns3_self_test,
+ .get_drvinfo = hns3_get_drvinfo,
+ .get_link = hns3_get_link,
+ .get_ringparam = hns3_get_ringparam,
+ .set_ringparam = hns3_set_ringparam,
+ .get_pauseparam = hns3_get_pauseparam,
+ .set_pauseparam = hns3_set_pauseparam,
+ .get_strings = hns3_get_strings,
+ .get_ethtool_stats = hns3_get_stats,
+ .get_sset_count = hns3_get_sset_count,
+ .get_rxnfc = hns3_get_rxnfc,
+ .set_rxnfc = hns3_set_rxnfc,
+ .get_rxfh_key_size = hns3_get_rss_key_size,
+ .get_rxfh_indir_size = hns3_get_rss_indir_size,
+ .get_rxfh = hns3_get_rss,
+ .set_rxfh = hns3_set_rss,
+ .get_link_ksettings = hns3_get_link_ksettings,
+ .set_link_ksettings = hns3_set_link_ksettings,
+ .nway_reset = hns3_nway_reset,
+ .get_channels = hns3_get_channels,
+ .set_channels = hns3_set_channels,
+ .get_coalesce = hns3_get_coalesce,
+ .set_coalesce = hns3_set_coalesce,
+ .get_regs_len = hns3_get_regs_len,
+ .get_regs = hns3_get_regs,
+ .set_phys_id = hns3_set_phys_id,
+ .get_msglevel = hns3_get_msglevel,
+ .set_msglevel = hns3_set_msglevel,
+ .get_fecparam = hns3_get_fecparam,
+ .set_fecparam = hns3_set_fecparam,
+ .get_fec_stats = hns3_get_fec_stats,
+ .get_module_info = hns3_get_module_info,
+ .get_module_eeprom = hns3_get_module_eeprom,
+ .get_priv_flags = hns3_get_priv_flags,
+ .set_priv_flags = hns3_set_priv_flags,
+ .get_ts_info = hns3_get_ts_info,
+ .get_tunable = hns3_get_tunable,
+ .set_tunable = hns3_set_tunable,
+ .reset = hns3_set_reset,
+ .get_link_ext_state = hns3_get_link_ext_state,
+ .get_wol = hns3_get_wol,
+ .set_wol = hns3_set_wol,
+};
+
+void hns3_ethtool_set_ops(struct net_device *netdev)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (h->flags & HNAE3_SUPPORT_VF)
+ netdev->ethtool_ops = &hns3vf_ethtool_ops;
+ else
+ netdev->ethtool_ops = &hns3_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h
new file mode 100644
index 0000000000..da207d1d9a
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+// Copyright (c) 2021 Hisilicon Limited.
+
+#ifndef __HNS3_ETHTOOL_H
+#define __HNS3_ETHTOOL_H
+
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+
+struct hns3_stats {
+ char stats_string[ETH_GSTRING_LEN];
+ int stats_offset;
+};
+
+struct hns3_sfp_type {
+ u8 type;
+ u8 ext_type;
+};
+
+struct hns3_pflag_desc {
+ char name[ETH_GSTRING_LEN];
+ void (*handler)(struct net_device *netdev, bool enable);
+};
+
+struct hns3_ethtool_link_ext_state_mapping {
+ u32 status_code;
+ enum ethtool_link_ext_state link_ext_state;
+ u8 link_ext_substate;
+};
+
+struct hns3_ring_param {
+ u32 tx_desc_num;
+ u32 rx_desc_num;
+ u32 rx_buf_len;
+};
+
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3_trace.h
new file mode 100644
index 0000000000..b8a1ecb4b8
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_trace.h
@@ -0,0 +1,138 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2018-2019 Hisilicon Limited. */
+
+/* This must be outside ifdef _HNS3_TRACE_H */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hns3
+
+#if !defined(_HNS3_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _HNS3_TRACE_H_
+
+#include <linux/tracepoint.h>
+
+#define DESC_NR (sizeof(struct hns3_desc) / sizeof(u32))
+
+DECLARE_EVENT_CLASS(hns3_skb_template,
+ TP_PROTO(struct sk_buff *skb),
+ TP_ARGS(skb),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, headlen)
+ __field(unsigned int, len)
+ __field(__u8, nr_frags)
+ __field(__u8, ip_summed)
+ __field(unsigned int, hdr_len)
+ __field(unsigned short, gso_size)
+ __field(unsigned short, gso_segs)
+ __field(unsigned int, gso_type)
+ __field(bool, fraglist)
+ __array(__u32, size, MAX_SKB_FRAGS)
+ ),
+
+ TP_fast_assign(
+ __entry->headlen = skb_headlen(skb);
+ __entry->len = skb->len;
+ __entry->nr_frags = skb_shinfo(skb)->nr_frags;
+ __entry->gso_size = skb_shinfo(skb)->gso_size;
+ __entry->gso_segs = skb_shinfo(skb)->gso_segs;
+ __entry->gso_type = skb_shinfo(skb)->gso_type;
+ __entry->hdr_len = skb->encapsulation ?
+ skb_inner_tcp_all_headers(skb) : skb_tcp_all_headers(skb);
+ __entry->ip_summed = skb->ip_summed;
+ __entry->fraglist = skb_has_frag_list(skb);
+ hns3_shinfo_pack(skb_shinfo(skb), __entry->size);
+ ),
+
+ TP_printk(
+ "len: %u, %u, %u, cs: %u, gso: %u, %u, %x, frag(%d %u): %s",
+ __entry->headlen, __entry->len, __entry->hdr_len,
+ __entry->ip_summed, __entry->gso_size, __entry->gso_segs,
+ __entry->gso_type, __entry->fraglist, __entry->nr_frags,
+ __print_array(__entry->size, MAX_SKB_FRAGS, sizeof(__u32))
+ )
+);
+
+DEFINE_EVENT(hns3_skb_template, hns3_over_max_bd,
+ TP_PROTO(struct sk_buff *skb),
+ TP_ARGS(skb));
+
+DEFINE_EVENT(hns3_skb_template, hns3_gro,
+ TP_PROTO(struct sk_buff *skb),
+ TP_ARGS(skb));
+
+DEFINE_EVENT(hns3_skb_template, hns3_tso,
+ TP_PROTO(struct sk_buff *skb),
+ TP_ARGS(skb));
+
+TRACE_EVENT(hns3_tx_desc,
+ TP_PROTO(struct hns3_enet_ring *ring, int cur_ntu),
+ TP_ARGS(ring, cur_ntu),
+
+ TP_STRUCT__entry(
+ __field(int, index)
+ __field(int, ntu)
+ __field(int, ntc)
+ __field(dma_addr_t, desc_dma)
+ __array(u32, desc, DESC_NR)
+ __string(devname, ring->tqp->handle->kinfo.netdev->name)
+ ),
+
+ TP_fast_assign(
+ __entry->index = ring->tqp->tqp_index;
+ __entry->ntu = ring->next_to_use;
+ __entry->ntc = ring->next_to_clean;
+ __entry->desc_dma = ring->desc_dma_addr,
+ memcpy(__entry->desc, &ring->desc[cur_ntu],
+ sizeof(struct hns3_desc));
+ __assign_str(devname, ring->tqp->handle->kinfo.netdev->name);
+ ),
+
+ TP_printk(
+ "%s-%d-%d/%d desc(%pad): %s",
+ __get_str(devname), __entry->index, __entry->ntu,
+ __entry->ntc, &__entry->desc_dma,
+ __print_array(__entry->desc, DESC_NR, sizeof(u32))
+ )
+);
+
+TRACE_EVENT(hns3_rx_desc,
+ TP_PROTO(struct hns3_enet_ring *ring),
+ TP_ARGS(ring),
+
+ TP_STRUCT__entry(
+ __field(int, index)
+ __field(int, ntu)
+ __field(int, ntc)
+ __field(dma_addr_t, desc_dma)
+ __field(dma_addr_t, buf_dma)
+ __array(u32, desc, DESC_NR)
+ __string(devname, ring->tqp->handle->kinfo.netdev->name)
+ ),
+
+ TP_fast_assign(
+ __entry->index = ring->tqp->tqp_index;
+ __entry->ntu = ring->next_to_use;
+ __entry->ntc = ring->next_to_clean;
+ __entry->desc_dma = ring->desc_dma_addr;
+ __entry->buf_dma = ring->desc_cb[ring->next_to_clean].dma;
+ memcpy(__entry->desc, &ring->desc[ring->next_to_clean],
+ sizeof(struct hns3_desc));
+ __assign_str(devname, ring->tqp->handle->kinfo.netdev->name);
+ ),
+
+ TP_printk(
+ "%s-%d-%d/%d desc(%pad) buf(%pad): %s",
+ __get_str(devname), __entry->index, __entry->ntu,
+ __entry->ntc, &__entry->desc_dma, &__entry->buf_dma,
+ __print_array(__entry->desc, DESC_NR, sizeof(u32))
+ )
+);
+
+#endif /* _HNS3_TRACE_H_ */
+
+/* This must be outside ifdef _HNS3_TRACE_H */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE hns3_trace
+#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
new file mode 100644
index 0000000000..4d15eb73b9
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -0,0 +1,891 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+// Copyright (c) 2016-2017 Hisilicon Limited.
+
+#ifndef __HCLGE_CMD_H
+#define __HCLGE_CMD_H
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/etherdevice.h>
+#include "hnae3.h"
+#include "hclge_comm_cmd.h"
+
+struct hclge_dev;
+
+#define HCLGE_CMDQ_RX_INVLD_B 0
+#define HCLGE_CMDQ_RX_OUTVLD_B 1
+
+struct hclge_misc_vector {
+ u8 __iomem *addr;
+ int vector_irq;
+ char name[HNAE3_INT_NAME_LEN];
+};
+
+#define hclge_cmd_setup_basic_desc(desc, opcode, is_read) \
+ hclge_comm_cmd_setup_basic_desc(desc, opcode, is_read)
+
+#define HCLGE_TQP_REG_OFFSET 0x80000
+#define HCLGE_TQP_REG_SIZE 0x200
+
+#define HCLGE_TQP_MAX_SIZE_DEV_V2 1024
+#define HCLGE_TQP_EXT_REG_OFFSET 0x100
+
+#define HCLGE_RCB_INIT_QUERY_TIMEOUT 10
+#define HCLGE_RCB_INIT_FLAG_EN_B 0
+#define HCLGE_RCB_INIT_FLAG_FINI_B 8
+struct hclge_config_rcb_init_cmd {
+ __le16 rcb_init_flag;
+ u8 rsv[22];
+};
+
+struct hclge_tqp_map_cmd {
+ __le16 tqp_id; /* Absolute tqp id for in this pf */
+ u8 tqp_vf; /* VF id */
+#define HCLGE_TQP_MAP_TYPE_PF 0
+#define HCLGE_TQP_MAP_TYPE_VF 1
+#define HCLGE_TQP_MAP_TYPE_B 0
+#define HCLGE_TQP_MAP_EN_B 1
+ u8 tqp_flag; /* Indicate it's pf or vf tqp */
+ __le16 tqp_vid; /* Virtual id in this pf/vf */
+ u8 rsv[18];
+};
+
+#define HCLGE_VECTOR_ELEMENTS_PER_CMD 10
+
+enum hclge_int_type {
+ HCLGE_INT_TX,
+ HCLGE_INT_RX,
+ HCLGE_INT_EVENT,
+};
+
+struct hclge_ctrl_vector_chain_cmd {
+#define HCLGE_VECTOR_ID_L_S 0
+#define HCLGE_VECTOR_ID_L_M GENMASK(7, 0)
+ u8 int_vector_id_l;
+ u8 int_cause_num;
+#define HCLGE_INT_TYPE_S 0
+#define HCLGE_INT_TYPE_M GENMASK(1, 0)
+#define HCLGE_TQP_ID_S 2
+#define HCLGE_TQP_ID_M GENMASK(12, 2)
+#define HCLGE_INT_GL_IDX_S 13
+#define HCLGE_INT_GL_IDX_M GENMASK(14, 13)
+ __le16 tqp_type_and_id[HCLGE_VECTOR_ELEMENTS_PER_CMD];
+ u8 vfid;
+#define HCLGE_VECTOR_ID_H_S 8
+#define HCLGE_VECTOR_ID_H_M GENMASK(15, 8)
+ u8 int_vector_id_h;
+};
+
+#define HCLGE_MAX_TC_NUM 8
+#define HCLGE_TC0_PRI_BUF_EN_B 15 /* Bit 15 indicate enable or not */
+#define HCLGE_BUF_UNIT_S 7 /* Buf size is united by 128 bytes */
+struct hclge_tx_buff_alloc_cmd {
+ __le16 tx_pkt_buff[HCLGE_MAX_TC_NUM];
+ u8 tx_buff_rsv[8];
+};
+
+struct hclge_rx_priv_buff_cmd {
+ __le16 buf_num[HCLGE_MAX_TC_NUM];
+ __le16 shared_buf;
+ u8 rsv[6];
+};
+
+#define HCLGE_RX_PRIV_EN_B 15
+#define HCLGE_TC_NUM_ONE_DESC 4
+struct hclge_priv_wl {
+ __le16 high;
+ __le16 low;
+};
+
+struct hclge_rx_priv_wl_buf {
+ struct hclge_priv_wl tc_wl[HCLGE_TC_NUM_ONE_DESC];
+};
+
+struct hclge_rx_com_thrd {
+ struct hclge_priv_wl com_thrd[HCLGE_TC_NUM_ONE_DESC];
+};
+
+struct hclge_rx_com_wl {
+ struct hclge_priv_wl com_wl;
+};
+
+struct hclge_waterline {
+ u32 low;
+ u32 high;
+};
+
+struct hclge_tc_thrd {
+ u32 low;
+ u32 high;
+};
+
+struct hclge_priv_buf {
+ struct hclge_waterline wl; /* Waterline for low and high */
+ u32 buf_size; /* TC private buffer size */
+ u32 tx_buf_size;
+ u32 enable; /* Enable TC private buffer or not */
+};
+
+struct hclge_shared_buf {
+ struct hclge_waterline self;
+ struct hclge_tc_thrd tc_thrd[HCLGE_MAX_TC_NUM];
+ u32 buf_size;
+};
+
+struct hclge_pkt_buf_alloc {
+ struct hclge_priv_buf priv_buf[HCLGE_MAX_TC_NUM];
+ struct hclge_shared_buf s_buf;
+};
+
+#define HCLGE_RX_COM_WL_EN_B 15
+struct hclge_rx_com_wl_buf_cmd {
+ __le16 high_wl;
+ __le16 low_wl;
+ u8 rsv[20];
+};
+
+#define HCLGE_RX_PKT_EN_B 15
+struct hclge_rx_pkt_buf_cmd {
+ __le16 high_pkt;
+ __le16 low_pkt;
+ u8 rsv[20];
+};
+
+#define HCLGE_PF_STATE_DONE_B 0
+#define HCLGE_PF_STATE_MAIN_B 1
+#define HCLGE_PF_STATE_BOND_B 2
+#define HCLGE_PF_STATE_MAC_N_B 6
+#define HCLGE_PF_MAC_NUM_MASK 0x3
+#define HCLGE_PF_STATE_MAIN BIT(HCLGE_PF_STATE_MAIN_B)
+#define HCLGE_PF_STATE_DONE BIT(HCLGE_PF_STATE_DONE_B)
+#define HCLGE_VF_RST_STATUS_CMD 4
+
+struct hclge_func_status_cmd {
+ __le32 vf_rst_state[HCLGE_VF_RST_STATUS_CMD];
+ u8 pf_state;
+ u8 mac_id;
+ u8 rsv1;
+ u8 pf_cnt_in_mac;
+ u8 pf_num;
+ u8 vf_num;
+ u8 rsv[2];
+};
+
+struct hclge_pf_res_cmd {
+ __le16 tqp_num;
+ __le16 buf_size;
+ __le16 msixcap_localid_ba_nic;
+ __le16 msixcap_localid_number_nic;
+ __le16 pf_intr_vector_number_roce;
+ __le16 pf_own_fun_number;
+ __le16 tx_buf_size;
+ __le16 dv_buf_size;
+ __le16 ext_tqp_num;
+ u8 rsv[6];
+};
+
+#define HCLGE_CFG_OFFSET_S 0
+#define HCLGE_CFG_OFFSET_M GENMASK(19, 0)
+#define HCLGE_CFG_RD_LEN_S 24
+#define HCLGE_CFG_RD_LEN_M GENMASK(27, 24)
+#define HCLGE_CFG_RD_LEN_BYTES 16
+#define HCLGE_CFG_RD_LEN_UNIT 4
+
+#define HCLGE_CFG_TC_NUM_S 8
+#define HCLGE_CFG_TC_NUM_M GENMASK(15, 8)
+#define HCLGE_CFG_TQP_DESC_N_S 16
+#define HCLGE_CFG_TQP_DESC_N_M GENMASK(31, 16)
+#define HCLGE_CFG_PHY_ADDR_S 0
+#define HCLGE_CFG_PHY_ADDR_M GENMASK(7, 0)
+#define HCLGE_CFG_MEDIA_TP_S 8
+#define HCLGE_CFG_MEDIA_TP_M GENMASK(15, 8)
+#define HCLGE_CFG_RX_BUF_LEN_S 16
+#define HCLGE_CFG_RX_BUF_LEN_M GENMASK(31, 16)
+#define HCLGE_CFG_MAC_ADDR_H_S 0
+#define HCLGE_CFG_MAC_ADDR_H_M GENMASK(15, 0)
+#define HCLGE_CFG_DEFAULT_SPEED_S 16
+#define HCLGE_CFG_DEFAULT_SPEED_M GENMASK(23, 16)
+#define HCLGE_CFG_RSS_SIZE_S 24
+#define HCLGE_CFG_RSS_SIZE_M GENMASK(31, 24)
+#define HCLGE_CFG_SPEED_ABILITY_S 0
+#define HCLGE_CFG_SPEED_ABILITY_M GENMASK(7, 0)
+#define HCLGE_CFG_SPEED_ABILITY_EXT_S 10
+#define HCLGE_CFG_SPEED_ABILITY_EXT_M GENMASK(15, 10)
+#define HCLGE_CFG_VLAN_FLTR_CAP_S 8
+#define HCLGE_CFG_VLAN_FLTR_CAP_M GENMASK(9, 8)
+#define HCLGE_CFG_UMV_TBL_SPACE_S 16
+#define HCLGE_CFG_UMV_TBL_SPACE_M GENMASK(31, 16)
+#define HCLGE_CFG_PF_RSS_SIZE_S 0
+#define HCLGE_CFG_PF_RSS_SIZE_M GENMASK(3, 0)
+#define HCLGE_CFG_TX_SPARE_BUF_SIZE_S 4
+#define HCLGE_CFG_TX_SPARE_BUF_SIZE_M GENMASK(15, 4)
+
+#define HCLGE_CFG_CMD_CNT 4
+
+struct hclge_cfg_param_cmd {
+ __le32 offset;
+ __le32 rsv;
+ __le32 param[HCLGE_CFG_CMD_CNT];
+};
+
+#define HCLGE_MAC_MODE 0x0
+#define HCLGE_DESC_NUM 0x40
+
+#define HCLGE_ALLOC_VALID_B 0
+struct hclge_vf_num_cmd {
+ u8 alloc_valid;
+ u8 rsv[23];
+};
+
+#define HCLGE_RSS_DEFAULT_OUTPORT_B 4
+
+#define HCLGE_RSS_CFG_TBL_SIZE_H 4
+#define HCLGE_RSS_CFG_TBL_BW_L 8U
+
+#define HCLGE_RSS_TC_OFFSET_S 0
+#define HCLGE_RSS_TC_OFFSET_M GENMASK(10, 0)
+#define HCLGE_RSS_TC_SIZE_MSB_B 11
+#define HCLGE_RSS_TC_SIZE_S 12
+#define HCLGE_RSS_TC_SIZE_M GENMASK(14, 12)
+#define HCLGE_RSS_TC_SIZE_MSB_OFFSET 3
+#define HCLGE_RSS_TC_VALID_B 15
+
+#define HCLGE_LINK_STATUS_UP_B 0
+#define HCLGE_LINK_STATUS_UP_M BIT(HCLGE_LINK_STATUS_UP_B)
+struct hclge_link_status_cmd {
+ u8 status;
+ u8 rsv[23];
+};
+
+/* for DEVICE_VERSION_V1/2, reference to promisc cmd byte8 */
+#define HCLGE_PROMISC_EN_UC 1
+#define HCLGE_PROMISC_EN_MC 2
+#define HCLGE_PROMISC_EN_BC 3
+#define HCLGE_PROMISC_TX_EN 4
+#define HCLGE_PROMISC_RX_EN 5
+
+/* for DEVICE_VERSION_V3, reference to promisc cmd byte10 */
+#define HCLGE_PROMISC_UC_RX_EN 2
+#define HCLGE_PROMISC_MC_RX_EN 3
+#define HCLGE_PROMISC_BC_RX_EN 4
+#define HCLGE_PROMISC_UC_TX_EN 5
+#define HCLGE_PROMISC_MC_TX_EN 6
+#define HCLGE_PROMISC_BC_TX_EN 7
+
+struct hclge_promisc_cfg_cmd {
+ u8 promisc;
+ u8 vf_id;
+ u8 extend_promisc;
+ u8 rsv0[21];
+};
+
+enum hclge_promisc_type {
+ HCLGE_UNICAST = 1,
+ HCLGE_MULTICAST = 2,
+ HCLGE_BROADCAST = 3,
+};
+
+#define HCLGE_MAC_TX_EN_B 6
+#define HCLGE_MAC_RX_EN_B 7
+#define HCLGE_MAC_PAD_TX_B 11
+#define HCLGE_MAC_PAD_RX_B 12
+#define HCLGE_MAC_1588_TX_B 13
+#define HCLGE_MAC_1588_RX_B 14
+#define HCLGE_MAC_APP_LP_B 15
+#define HCLGE_MAC_LINE_LP_B 16
+#define HCLGE_MAC_FCS_TX_B 17
+#define HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B 18
+#define HCLGE_MAC_RX_FCS_STRIP_B 19
+#define HCLGE_MAC_RX_FCS_B 20
+#define HCLGE_MAC_TX_UNDER_MIN_ERR_B 21
+#define HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B 22
+
+struct hclge_config_mac_mode_cmd {
+ __le32 txrx_pad_fcs_loop_en;
+ u8 rsv[20];
+};
+
+struct hclge_pf_rst_sync_cmd {
+#define HCLGE_PF_RST_ALL_VF_RDY_B 0
+ u8 all_vf_ready;
+ u8 rsv[23];
+};
+
+#define HCLGE_CFG_SPEED_S 0
+#define HCLGE_CFG_SPEED_M GENMASK(5, 0)
+
+#define HCLGE_CFG_DUPLEX_B 7
+#define HCLGE_CFG_DUPLEX_M BIT(HCLGE_CFG_DUPLEX_B)
+
+struct hclge_config_mac_speed_dup_cmd {
+ u8 speed_dup;
+
+#define HCLGE_CFG_MAC_SPEED_CHANGE_EN_B 0
+ u8 mac_change_fec_en;
+ u8 rsv[4];
+ u8 lane_num;
+ u8 rsv1[17];
+};
+
+#define HCLGE_TQP_ENABLE_B 0
+
+#define HCLGE_MAC_CFG_AN_EN_B 0
+#define HCLGE_MAC_CFG_AN_INT_EN_B 1
+#define HCLGE_MAC_CFG_AN_INT_MSK_B 2
+#define HCLGE_MAC_CFG_AN_INT_CLR_B 3
+#define HCLGE_MAC_CFG_AN_RST_B 4
+
+#define HCLGE_MAC_CFG_AN_EN BIT(HCLGE_MAC_CFG_AN_EN_B)
+
+struct hclge_config_auto_neg_cmd {
+ __le32 cfg_an_cmd_flag;
+ u8 rsv[20];
+};
+
+struct hclge_sfp_info_cmd {
+ __le32 speed;
+ u8 query_type; /* 0: sfp speed, 1: active speed */
+ u8 active_fec;
+ u8 autoneg; /* autoneg state */
+ u8 autoneg_ability; /* whether support autoneg */
+ __le32 speed_ability; /* speed ability for current media */
+ __le32 module_type;
+ u8 fec_ability;
+ u8 lane_num;
+ u8 rsv[6];
+};
+
+#define HCLGE_MAC_CFG_FEC_AUTO_EN_B 0
+#define HCLGE_MAC_CFG_FEC_MODE_S 1
+#define HCLGE_MAC_CFG_FEC_MODE_M GENMASK(3, 1)
+#define HCLGE_MAC_CFG_FEC_SET_DEF_B 0
+#define HCLGE_MAC_CFG_FEC_CLR_DEF_B 1
+
+#define HCLGE_MAC_FEC_OFF 0
+#define HCLGE_MAC_FEC_BASER 1
+#define HCLGE_MAC_FEC_RS 2
+#define HCLGE_MAC_FEC_LLRS 3
+struct hclge_config_fec_cmd {
+ u8 fec_mode;
+ u8 default_config;
+ u8 rsv[22];
+};
+
+#define HCLGE_FEC_STATS_CMD_NUM 4
+
+struct hclge_query_fec_stats_cmd {
+ /* fec rs mode total stats */
+ __le32 rs_fec_corr_blocks;
+ __le32 rs_fec_uncorr_blocks;
+ __le32 rs_fec_error_blocks;
+ /* fec base-r mode per lanes stats */
+ u8 base_r_lane_num;
+ u8 rsv[3];
+ __le32 base_r_fec_corr_blocks;
+ __le32 base_r_fec_uncorr_blocks;
+};
+
+#define HCLGE_MAC_UPLINK_PORT 0x100
+
+struct hclge_config_max_frm_size_cmd {
+ __le16 max_frm_size;
+ u8 min_frm_size;
+ u8 rsv[21];
+};
+
+enum hclge_mac_vlan_tbl_opcode {
+ HCLGE_MAC_VLAN_ADD, /* Add new or modify mac_vlan */
+ HCLGE_MAC_VLAN_UPDATE, /* Modify other fields of this table */
+ HCLGE_MAC_VLAN_REMOVE, /* Remove a entry through mac_vlan key */
+ HCLGE_MAC_VLAN_LKUP, /* Lookup a entry through mac_vlan key */
+};
+
+enum hclge_mac_vlan_add_resp_code {
+ HCLGE_ADD_UC_OVERFLOW = 2, /* ADD failed for UC overflow */
+ HCLGE_ADD_MC_OVERFLOW, /* ADD failed for MC overflow */
+};
+
+#define HCLGE_MAC_VLAN_BIT0_EN_B 0
+#define HCLGE_MAC_VLAN_BIT1_EN_B 1
+#define HCLGE_MAC_EPORT_SW_EN_B 12
+#define HCLGE_MAC_EPORT_TYPE_B 11
+#define HCLGE_MAC_EPORT_VFID_S 3
+#define HCLGE_MAC_EPORT_VFID_M GENMASK(10, 3)
+#define HCLGE_MAC_EPORT_PFID_S 0
+#define HCLGE_MAC_EPORT_PFID_M GENMASK(2, 0)
+struct hclge_mac_vlan_tbl_entry_cmd {
+ u8 flags;
+ u8 resp_code;
+ __le16 vlan_tag;
+ __le32 mac_addr_hi32;
+ __le16 mac_addr_lo16;
+ __le16 rsv1;
+ u8 entry_type;
+ u8 mc_mac_en;
+ __le16 egress_port;
+ __le16 egress_queue;
+ u8 rsv2[6];
+};
+
+#define HCLGE_UMV_SPC_ALC_B 0
+struct hclge_umv_spc_alc_cmd {
+ u8 allocate;
+ u8 rsv1[3];
+ __le32 space_size;
+ u8 rsv2[16];
+};
+
+#define HCLGE_MAC_MGR_MASK_VLAN_B BIT(0)
+#define HCLGE_MAC_MGR_MASK_MAC_B BIT(1)
+#define HCLGE_MAC_MGR_MASK_ETHERTYPE_B BIT(2)
+
+struct hclge_mac_mgr_tbl_entry_cmd {
+ u8 flags;
+ u8 resp_code;
+ __le16 vlan_tag;
+ u8 mac_addr[ETH_ALEN];
+ __le16 rsv1;
+ __le16 ethter_type;
+ __le16 egress_port;
+ __le16 egress_queue;
+ u8 sw_port_id_aware;
+ u8 rsv2;
+ u8 i_port_bitmap;
+ u8 i_port_direction;
+ u8 rsv3[2];
+};
+
+struct hclge_vlan_filter_ctrl_cmd {
+ u8 vlan_type;
+ u8 vlan_fe;
+ u8 rsv1[2];
+ u8 vf_id;
+ u8 rsv2[19];
+};
+
+#define HCLGE_VLAN_ID_OFFSET_STEP 160
+#define HCLGE_VLAN_BYTE_SIZE 8
+#define HCLGE_VLAN_OFFSET_BITMAP \
+ (HCLGE_VLAN_ID_OFFSET_STEP / HCLGE_VLAN_BYTE_SIZE)
+
+struct hclge_vlan_filter_pf_cfg_cmd {
+ u8 vlan_offset;
+ u8 vlan_cfg;
+ u8 rsv[2];
+ u8 vlan_offset_bitmap[HCLGE_VLAN_OFFSET_BITMAP];
+};
+
+#define HCLGE_MAX_VF_BYTES 16
+
+struct hclge_vlan_filter_vf_cfg_cmd {
+ __le16 vlan_id;
+ u8 resp_code;
+ u8 rsv;
+ u8 vlan_cfg;
+ u8 rsv1[3];
+ u8 vf_bitmap[HCLGE_MAX_VF_BYTES];
+};
+
+#define HCLGE_INGRESS_BYPASS_B 0
+struct hclge_port_vlan_filter_bypass_cmd {
+ u8 bypass_state;
+ u8 rsv1[3];
+ u8 vf_id;
+ u8 rsv2[19];
+};
+
+#define HCLGE_SWITCH_ANTI_SPOOF_B 0U
+#define HCLGE_SWITCH_ALW_LPBK_B 1U
+#define HCLGE_SWITCH_ALW_LCL_LPBK_B 2U
+#define HCLGE_SWITCH_ALW_DST_OVRD_B 3U
+#define HCLGE_SWITCH_NO_MASK 0x0
+#define HCLGE_SWITCH_ANTI_SPOOF_MASK 0xFE
+#define HCLGE_SWITCH_ALW_LPBK_MASK 0xFD
+#define HCLGE_SWITCH_ALW_LCL_LPBK_MASK 0xFB
+#define HCLGE_SWITCH_LW_DST_OVRD_MASK 0xF7
+
+struct hclge_mac_vlan_switch_cmd {
+ u8 roce_sel;
+ u8 rsv1[3];
+ __le32 func_id;
+ u8 switch_param;
+ u8 rsv2[3];
+ u8 param_mask;
+ u8 rsv3[11];
+};
+
+enum hclge_mac_vlan_cfg_sel {
+ HCLGE_MAC_VLAN_NIC_SEL = 0,
+ HCLGE_MAC_VLAN_ROCE_SEL,
+};
+
+#define HCLGE_ACCEPT_TAG1_B 0
+#define HCLGE_ACCEPT_UNTAG1_B 1
+#define HCLGE_PORT_INS_TAG1_EN_B 2
+#define HCLGE_PORT_INS_TAG2_EN_B 3
+#define HCLGE_CFG_NIC_ROCE_SEL_B 4
+#define HCLGE_ACCEPT_TAG2_B 5
+#define HCLGE_ACCEPT_UNTAG2_B 6
+#define HCLGE_TAG_SHIFT_MODE_EN_B 7
+#define HCLGE_VF_NUM_PER_BYTE 8
+
+struct hclge_vport_vtag_tx_cfg_cmd {
+ u8 vport_vlan_cfg;
+ u8 vf_offset;
+ u8 rsv1[2];
+ __le16 def_vlan_tag1;
+ __le16 def_vlan_tag2;
+ u8 vf_bitmap[HCLGE_VF_NUM_PER_BYTE];
+ u8 rsv2[8];
+};
+
+#define HCLGE_REM_TAG1_EN_B 0
+#define HCLGE_REM_TAG2_EN_B 1
+#define HCLGE_SHOW_TAG1_EN_B 2
+#define HCLGE_SHOW_TAG2_EN_B 3
+#define HCLGE_DISCARD_TAG1_EN_B 5
+#define HCLGE_DISCARD_TAG2_EN_B 6
+struct hclge_vport_vtag_rx_cfg_cmd {
+ u8 vport_vlan_cfg;
+ u8 vf_offset;
+ u8 rsv1[6];
+ u8 vf_bitmap[HCLGE_VF_NUM_PER_BYTE];
+ u8 rsv2[8];
+};
+
+struct hclge_tx_vlan_type_cfg_cmd {
+ __le16 ot_vlan_type;
+ __le16 in_vlan_type;
+ u8 rsv[20];
+};
+
+struct hclge_rx_vlan_type_cfg_cmd {
+ __le16 ot_fst_vlan_type;
+ __le16 ot_sec_vlan_type;
+ __le16 in_fst_vlan_type;
+ __le16 in_sec_vlan_type;
+ u8 rsv[16];
+};
+
+struct hclge_cfg_com_tqp_queue_cmd {
+ __le16 tqp_id;
+ __le16 stream_id;
+ u8 enable;
+ u8 rsv[19];
+};
+
+struct hclge_cfg_tx_queue_pointer_cmd {
+ __le16 tqp_id;
+ __le16 tx_tail;
+ __le16 tx_head;
+ __le16 fbd_num;
+ __le16 ring_offset;
+ u8 rsv[14];
+};
+
+#pragma pack(1)
+struct hclge_mac_ethertype_idx_rd_cmd {
+ u8 flags;
+ u8 resp_code;
+ __le16 vlan_tag;
+ u8 mac_addr[ETH_ALEN];
+ __le16 index;
+ __le16 ethter_type;
+ __le16 egress_port;
+ __le16 egress_queue;
+ __le16 rev0;
+ u8 i_port_bitmap;
+ u8 i_port_direction;
+ u8 rev1[2];
+};
+
+#pragma pack()
+
+#define HCLGE_TSO_MSS_MIN_S 0
+#define HCLGE_TSO_MSS_MIN_M GENMASK(13, 0)
+
+#define HCLGE_TSO_MSS_MAX_S 16
+#define HCLGE_TSO_MSS_MAX_M GENMASK(29, 16)
+
+struct hclge_cfg_tso_status_cmd {
+ __le16 tso_mss_min;
+ __le16 tso_mss_max;
+ u8 rsv[20];
+};
+
+#define HCLGE_GRO_EN_B 0
+struct hclge_cfg_gro_status_cmd {
+ u8 gro_en;
+ u8 rsv[23];
+};
+
+#define HCLGE_TSO_MSS_MIN 256
+#define HCLGE_TSO_MSS_MAX 9668
+
+#define HCLGE_TQP_RESET_B 0
+struct hclge_reset_tqp_queue_cmd {
+ __le16 tqp_id;
+ u8 reset_req;
+ u8 ready_to_reset;
+ u8 rsv[20];
+};
+
+#define HCLGE_CFG_RESET_MAC_B 3
+#define HCLGE_CFG_RESET_FUNC_B 7
+#define HCLGE_CFG_RESET_RCB_B 1
+struct hclge_reset_cmd {
+ u8 mac_func_reset;
+ u8 fun_reset_vfid;
+ u8 fun_reset_rcb;
+ u8 rsv;
+ __le16 fun_reset_rcb_vqid_start;
+ __le16 fun_reset_rcb_vqid_num;
+ u8 fun_reset_rcb_return_status;
+ u8 rsv1[15];
+};
+
+#define HCLGE_PF_RESET_DONE_BIT BIT(0)
+
+struct hclge_pf_rst_done_cmd {
+ u8 pf_rst_done;
+ u8 rsv[23];
+};
+
+#define HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B BIT(0)
+#define HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B BIT(2)
+#define HCLGE_CMD_GE_PHY_INNER_LOOP_B BIT(3)
+#define HCLGE_CMD_COMMON_LB_DONE_B BIT(0)
+#define HCLGE_CMD_COMMON_LB_SUCCESS_B BIT(1)
+struct hclge_common_lb_cmd {
+ u8 mask;
+ u8 enable;
+ u8 result;
+ u8 rsv[21];
+};
+
+#define HCLGE_DEFAULT_TX_BUF 0x4000 /* 16k bytes */
+#define HCLGE_TOTAL_PKT_BUF 0x108000 /* 1.03125M bytes */
+#define HCLGE_DEFAULT_DV 0xA000 /* 40k byte */
+#define HCLGE_DEFAULT_NON_DCB_DV 0x7800 /* 30K byte */
+#define HCLGE_NON_DCB_ADDITIONAL_BUF 0x1400 /* 5120 byte */
+
+#define HCLGE_LED_LOCATE_STATE_S 0
+#define HCLGE_LED_LOCATE_STATE_M GENMASK(1, 0)
+
+struct hclge_set_led_state_cmd {
+ u8 rsv1[3];
+ u8 locate_led_config;
+ u8 rsv2[20];
+};
+
+struct hclge_get_fd_mode_cmd {
+ u8 mode;
+ u8 enable;
+ u8 rsv[22];
+};
+
+struct hclge_get_fd_allocation_cmd {
+ __le32 stage1_entry_num;
+ __le32 stage2_entry_num;
+ __le16 stage1_counter_num;
+ __le16 stage2_counter_num;
+ u8 rsv[12];
+};
+
+struct hclge_set_fd_key_config_cmd {
+ u8 stage;
+ u8 key_select;
+ u8 inner_sipv6_word_en;
+ u8 inner_dipv6_word_en;
+ u8 outer_sipv6_word_en;
+ u8 outer_dipv6_word_en;
+ u8 rsv1[2];
+ __le32 tuple_mask;
+ __le32 meta_data_mask;
+ u8 rsv2[8];
+};
+
+#define HCLGE_FD_EPORT_SW_EN_B 0
+struct hclge_fd_tcam_config_1_cmd {
+ u8 stage;
+ u8 xy_sel;
+ u8 port_info;
+ u8 rsv1[1];
+ __le32 index;
+ u8 entry_vld;
+ u8 rsv2[7];
+ u8 tcam_data[8];
+};
+
+struct hclge_fd_tcam_config_2_cmd {
+ u8 tcam_data[24];
+};
+
+struct hclge_fd_tcam_config_3_cmd {
+ u8 tcam_data[20];
+ u8 rsv[4];
+};
+
+#define HCLGE_FD_AD_DROP_B 0
+#define HCLGE_FD_AD_DIRECT_QID_B 1
+#define HCLGE_FD_AD_QID_S 2
+#define HCLGE_FD_AD_QID_M GENMASK(11, 2)
+#define HCLGE_FD_AD_USE_COUNTER_B 12
+#define HCLGE_FD_AD_COUNTER_NUM_S 13
+#define HCLGE_FD_AD_COUNTER_NUM_M GENMASK(20, 13)
+#define HCLGE_FD_AD_NXT_STEP_B 20
+#define HCLGE_FD_AD_NXT_KEY_S 21
+#define HCLGE_FD_AD_NXT_KEY_M GENMASK(25, 21)
+#define HCLGE_FD_AD_WR_RULE_ID_B 0
+#define HCLGE_FD_AD_RULE_ID_S 1
+#define HCLGE_FD_AD_RULE_ID_M GENMASK(12, 1)
+#define HCLGE_FD_AD_TC_OVRD_B 16
+#define HCLGE_FD_AD_TC_SIZE_S 17
+#define HCLGE_FD_AD_TC_SIZE_M GENMASK(20, 17)
+
+struct hclge_fd_ad_config_cmd {
+ u8 stage;
+ u8 rsv1[3];
+ __le32 index;
+ __le64 ad_data;
+ u8 rsv2[8];
+};
+
+struct hclge_fd_ad_cnt_read_cmd {
+ u8 rsv0[4];
+ __le16 index;
+ u8 rsv1[2];
+ __le64 cnt;
+ u8 rsv2[8];
+};
+
+#define HCLGE_FD_USER_DEF_OFT_S 0
+#define HCLGE_FD_USER_DEF_OFT_M GENMASK(14, 0)
+#define HCLGE_FD_USER_DEF_EN_B 15
+struct hclge_fd_user_def_cfg_cmd {
+ __le16 ol2_cfg;
+ __le16 l2_cfg;
+ __le16 ol3_cfg;
+ __le16 l3_cfg;
+ __le16 ol4_cfg;
+ __le16 l4_cfg;
+ u8 rsv[12];
+};
+
+struct hclge_get_imp_bd_cmd {
+ __le32 bd_num;
+ u8 rsv[20];
+};
+
+struct hclge_query_ppu_pf_other_int_dfx_cmd {
+ __le16 over_8bd_no_fe_qid;
+ __le16 over_8bd_no_fe_vf_id;
+ __le16 tso_mss_cmp_min_err_qid;
+ __le16 tso_mss_cmp_min_err_vf_id;
+ __le16 tso_mss_cmp_max_err_qid;
+ __le16 tso_mss_cmp_max_err_vf_id;
+ __le16 tx_rd_fbd_poison_qid;
+ __le16 tx_rd_fbd_poison_vf_id;
+ __le16 rx_rd_fbd_poison_qid;
+ __le16 rx_rd_fbd_poison_vf_id;
+ u8 rsv[4];
+};
+
+#define HCLGE_SFP_INFO_CMD_NUM 6
+#define HCLGE_SFP_INFO_BD0_LEN 20
+#define HCLGE_SFP_INFO_BDX_LEN 24
+#define HCLGE_SFP_INFO_MAX_LEN \
+ (HCLGE_SFP_INFO_BD0_LEN + \
+ (HCLGE_SFP_INFO_CMD_NUM - 1) * HCLGE_SFP_INFO_BDX_LEN)
+
+struct hclge_sfp_info_bd0_cmd {
+ __le16 offset;
+ __le16 read_len;
+ u8 data[HCLGE_SFP_INFO_BD0_LEN];
+};
+
+#define HCLGE_QUERY_DEV_SPECS_BD_NUM 4
+
+struct hclge_dev_specs_0_cmd {
+ __le32 rsv0;
+ __le32 mac_entry_num;
+ __le32 mng_entry_num;
+ __le16 rss_ind_tbl_size;
+ __le16 rss_key_size;
+ __le16 int_ql_max;
+ u8 max_non_tso_bd_num;
+ u8 rsv1;
+ __le32 max_tm_rate;
+};
+
+#define HCLGE_DEF_MAX_INT_GL 0x1FE0U
+
+struct hclge_dev_specs_1_cmd {
+ __le16 max_frm_size;
+ __le16 max_qset_num;
+ __le16 max_int_gl;
+ u8 rsv0[2];
+ __le16 umv_size;
+ __le16 mc_mac_size;
+ u8 rsv1[6];
+ u8 tnl_num;
+ u8 rsv2[5];
+};
+
+/* mac speed type defined in firmware command */
+enum HCLGE_FIRMWARE_MAC_SPEED {
+ HCLGE_FW_MAC_SPEED_1G,
+ HCLGE_FW_MAC_SPEED_10G,
+ HCLGE_FW_MAC_SPEED_25G,
+ HCLGE_FW_MAC_SPEED_40G,
+ HCLGE_FW_MAC_SPEED_50G,
+ HCLGE_FW_MAC_SPEED_100G,
+ HCLGE_FW_MAC_SPEED_10M,
+ HCLGE_FW_MAC_SPEED_100M,
+ HCLGE_FW_MAC_SPEED_200G,
+};
+
+#define HCLGE_PHY_LINK_SETTING_BD_NUM 2
+
+struct hclge_phy_link_ksetting_0_cmd {
+ __le32 speed;
+ u8 duplex;
+ u8 autoneg;
+ u8 eth_tp_mdix;
+ u8 eth_tp_mdix_ctrl;
+ u8 port;
+ u8 transceiver;
+ u8 phy_address;
+ u8 rsv;
+ __le32 supported;
+ __le32 advertising;
+ __le32 lp_advertising;
+};
+
+struct hclge_phy_link_ksetting_1_cmd {
+ u8 master_slave_cfg;
+ u8 master_slave_state;
+ u8 rsv[22];
+};
+
+struct hclge_phy_reg_cmd {
+ __le16 reg_addr;
+ u8 rsv0[2];
+ __le16 reg_val;
+ u8 rsv1[18];
+};
+
+struct hclge_wol_cfg_cmd {
+ __le32 wake_on_lan_mode;
+ u8 sopass[SOPASS_MAX];
+ u8 sopass_size;
+ u8 rsv[13];
+};
+
+struct hclge_query_wol_supported_cmd {
+ __le32 supported_wake_mode;
+ u8 rsv[20];
+};
+
+struct hclge_hw;
+int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num);
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
new file mode 100644
index 0000000000..b98301e205
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
@@ -0,0 +1,680 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
+
+#include "hclge_main.h"
+#include "hclge_dcb.h"
+#include "hclge_tm.h"
+#include "hnae3.h"
+
+#define BW_PERCENT 100
+
+static int hclge_ieee_ets_to_tm_info(struct hclge_dev *hdev,
+ struct ieee_ets *ets)
+{
+ u8 i;
+
+ for (i = 0; i < HNAE3_MAX_TC; i++) {
+ switch (ets->tc_tsa[i]) {
+ case IEEE_8021QAZ_TSA_STRICT:
+ hdev->tm_info.tc_info[i].tc_sch_mode =
+ HCLGE_SCH_MODE_SP;
+ hdev->tm_info.pg_info[0].tc_dwrr[i] = 0;
+ break;
+ case IEEE_8021QAZ_TSA_ETS:
+ hdev->tm_info.tc_info[i].tc_sch_mode =
+ HCLGE_SCH_MODE_DWRR;
+ hdev->tm_info.pg_info[0].tc_dwrr[i] =
+ ets->tc_tx_bw[i];
+ break;
+ default:
+ /* Hardware only supports SP (strict priority)
+ * or ETS (enhanced transmission selection)
+ * algorithms, if we receive some other value
+ * from dcbnl, then throw an error.
+ */
+ return -EINVAL;
+ }
+ }
+
+ hclge_tm_prio_tc_info_update(hdev, ets->prio_tc);
+
+ return 0;
+}
+
+static void hclge_tm_info_to_ieee_ets(struct hclge_dev *hdev,
+ struct ieee_ets *ets)
+{
+ u32 i;
+
+ memset(ets, 0, sizeof(*ets));
+ ets->willing = 1;
+ ets->ets_cap = hdev->tc_max;
+
+ for (i = 0; i < HNAE3_MAX_TC; i++) {
+ ets->prio_tc[i] = hdev->tm_info.prio_tc[i];
+ if (i < hdev->tm_info.num_tc)
+ ets->tc_tx_bw[i] = hdev->tm_info.pg_info[0].tc_dwrr[i];
+ else
+ ets->tc_tx_bw[i] = 0;
+
+ if (hdev->tm_info.tc_info[i].tc_sch_mode ==
+ HCLGE_SCH_MODE_SP)
+ ets->tc_tsa[i] = IEEE_8021QAZ_TSA_STRICT;
+ else
+ ets->tc_tsa[i] = IEEE_8021QAZ_TSA_ETS;
+ }
+}
+
+/* IEEE std */
+static int hclge_ieee_getets(struct hnae3_handle *h, struct ieee_ets *ets)
+{
+ struct hclge_vport *vport = hclge_get_vport(h);
+ struct hclge_dev *hdev = vport->back;
+
+ hclge_tm_info_to_ieee_ets(hdev, ets);
+
+ return 0;
+}
+
+static int hclge_dcb_common_validate(struct hclge_dev *hdev, u8 num_tc,
+ u8 *prio_tc)
+{
+ int i;
+
+ if (num_tc > hdev->tc_max) {
+ dev_err(&hdev->pdev->dev,
+ "tc num checking failed, %u > tc_max(%u)\n",
+ num_tc, hdev->tc_max);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
+ if (prio_tc[i] >= num_tc) {
+ dev_err(&hdev->pdev->dev,
+ "prio_tc[%d] checking failed, %u >= num_tc(%u)\n",
+ i, prio_tc[i], num_tc);
+ return -EINVAL;
+ }
+ }
+
+ if (num_tc > hdev->vport[0].alloc_tqps) {
+ dev_err(&hdev->pdev->dev,
+ "allocated tqp checking failed, %u > tqp(%u)\n",
+ num_tc, hdev->vport[0].alloc_tqps);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static u8 hclge_ets_tc_changed(struct hclge_dev *hdev, struct ieee_ets *ets,
+ bool *changed)
+{
+ u8 max_tc_id = 0;
+ u8 i;
+
+ for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
+ if (ets->prio_tc[i] != hdev->tm_info.prio_tc[i])
+ *changed = true;
+
+ if (ets->prio_tc[i] > max_tc_id)
+ max_tc_id = ets->prio_tc[i];
+ }
+
+ /* return max tc number, max tc id need to plus 1 */
+ return max_tc_id + 1;
+}
+
+static int hclge_ets_sch_mode_validate(struct hclge_dev *hdev,
+ struct ieee_ets *ets, bool *changed,
+ u8 tc_num)
+{
+ bool has_ets_tc = false;
+ u32 total_ets_bw = 0;
+ u8 i;
+
+ for (i = 0; i < HNAE3_MAX_TC; i++) {
+ switch (ets->tc_tsa[i]) {
+ case IEEE_8021QAZ_TSA_STRICT:
+ if (hdev->tm_info.tc_info[i].tc_sch_mode !=
+ HCLGE_SCH_MODE_SP)
+ *changed = true;
+ break;
+ case IEEE_8021QAZ_TSA_ETS:
+ if (i >= tc_num) {
+ dev_err(&hdev->pdev->dev,
+ "tc%u is disabled, cannot set ets bw\n",
+ i);
+ return -EINVAL;
+ }
+
+ /* The hardware will switch to sp mode if bandwidth is
+ * 0, so limit ets bandwidth must be greater than 0.
+ */
+ if (!ets->tc_tx_bw[i]) {
+ dev_err(&hdev->pdev->dev,
+ "tc%u ets bw cannot be 0\n", i);
+ return -EINVAL;
+ }
+
+ if (hdev->tm_info.tc_info[i].tc_sch_mode !=
+ HCLGE_SCH_MODE_DWRR)
+ *changed = true;
+
+ total_ets_bw += ets->tc_tx_bw[i];
+ has_ets_tc = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ if (has_ets_tc && total_ets_bw != BW_PERCENT)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets,
+ u8 *tc, bool *changed)
+{
+ u8 tc_num;
+ int ret;
+
+ tc_num = hclge_ets_tc_changed(hdev, ets, changed);
+
+ ret = hclge_dcb_common_validate(hdev, tc_num, ets->prio_tc);
+ if (ret)
+ return ret;
+
+ ret = hclge_ets_sch_mode_validate(hdev, ets, changed, tc_num);
+ if (ret)
+ return ret;
+
+ *tc = tc_num;
+ if (*tc != hdev->tm_info.num_tc)
+ *changed = true;
+
+ return 0;
+}
+
+static int hclge_map_update(struct hclge_dev *hdev)
+{
+ int ret;
+
+ ret = hclge_tm_schd_setup_hw(hdev);
+ if (ret)
+ return ret;
+
+ ret = hclge_pause_setup_hw(hdev, false);
+ if (ret)
+ return ret;
+
+ ret = hclge_buffer_alloc(hdev);
+ if (ret)
+ return ret;
+
+ hclge_comm_rss_indir_init_cfg(hdev->ae_dev, &hdev->rss_cfg);
+
+ return hclge_rss_init_hw(hdev);
+}
+
+static int hclge_notify_down_uinit(struct hclge_dev *hdev)
+{
+ int ret;
+
+ ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ if (ret)
+ return ret;
+
+ ret = hclge_tm_flush_cfg(hdev, true);
+ if (ret)
+ return ret;
+
+ return hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
+}
+
+static int hclge_notify_init_up(struct hclge_dev *hdev)
+{
+ int ret;
+
+ ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
+ if (ret)
+ return ret;
+
+ ret = hclge_tm_flush_cfg(hdev, false);
+ if (ret)
+ return ret;
+
+ return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+}
+
+static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
+{
+ struct hclge_vport *vport = hclge_get_vport(h);
+ struct net_device *netdev = h->kinfo.netdev;
+ struct hclge_dev *hdev = vport->back;
+ bool map_changed = false;
+ u8 num_tc = 0;
+ int ret;
+
+ if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
+ h->kinfo.tc_info.mqprio_active)
+ return -EINVAL;
+
+ ret = hclge_ets_validate(hdev, ets, &num_tc, &map_changed);
+ if (ret)
+ return ret;
+
+ if (map_changed) {
+ netif_dbg(h, drv, netdev, "set ets\n");
+
+ ret = hclge_notify_down_uinit(hdev);
+ if (ret)
+ return ret;
+ }
+
+ hclge_tm_schd_info_update(hdev, num_tc);
+ h->kinfo.tc_info.dcb_ets_active = num_tc > 1;
+
+ ret = hclge_ieee_ets_to_tm_info(hdev, ets);
+ if (ret)
+ goto err_out;
+
+ if (map_changed) {
+ ret = hclge_map_update(hdev);
+ if (ret)
+ goto err_out;
+
+ return hclge_notify_init_up(hdev);
+ }
+
+ return hclge_tm_dwrr_cfg(hdev);
+
+err_out:
+ if (!map_changed)
+ return ret;
+
+ hclge_notify_init_up(hdev);
+
+ return ret;
+}
+
+static int hclge_ieee_getpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
+{
+ struct hclge_vport *vport = hclge_get_vport(h);
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ memset(pfc, 0, sizeof(*pfc));
+ pfc->pfc_cap = hdev->pfc_max;
+ pfc->pfc_en = hdev->tm_info.pfc_en;
+
+ ret = hclge_mac_update_stats(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to update MAC stats, ret = %d.\n", ret);
+ return ret;
+ }
+
+ hclge_pfc_tx_stats_get(hdev, pfc->requests);
+ hclge_pfc_rx_stats_get(hdev, pfc->indications);
+
+ return 0;
+}
+
+static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
+{
+ struct hclge_vport *vport = hclge_get_vport(h);
+ struct net_device *netdev = h->kinfo.netdev;
+ struct hclge_dev *hdev = vport->back;
+ u8 i, j, pfc_map, *prio_tc;
+ int last_bad_ret = 0;
+ int ret;
+
+ if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
+ return -EINVAL;
+
+ if (pfc->pfc_en == hdev->tm_info.pfc_en)
+ return 0;
+
+ prio_tc = hdev->tm_info.prio_tc;
+ pfc_map = 0;
+
+ for (i = 0; i < hdev->tm_info.num_tc; i++) {
+ for (j = 0; j < HNAE3_MAX_USER_PRIO; j++) {
+ if ((prio_tc[j] == i) && (pfc->pfc_en & BIT(j))) {
+ pfc_map |= BIT(i);
+ break;
+ }
+ }
+ }
+
+ hdev->tm_info.hw_pfc_map = pfc_map;
+ hdev->tm_info.pfc_en = pfc->pfc_en;
+
+ netif_dbg(h, drv, netdev,
+ "set pfc: pfc_en=%x, pfc_map=%x, num_tc=%u\n",
+ pfc->pfc_en, pfc_map, hdev->tm_info.num_tc);
+
+ hclge_tm_pfc_info_update(hdev);
+
+ ret = hclge_pause_setup_hw(hdev, false);
+ if (ret)
+ return ret;
+
+ ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ if (ret)
+ return ret;
+
+ ret = hclge_tm_flush_cfg(hdev, true);
+ if (ret)
+ return ret;
+
+ /* No matter whether the following operations are performed
+ * successfully or not, disabling the tm flush and notify
+ * the network status to up are necessary.
+ * Do not return immediately.
+ */
+ ret = hclge_buffer_alloc(hdev);
+ if (ret)
+ last_bad_ret = ret;
+
+ ret = hclge_tm_flush_cfg(hdev, false);
+ if (ret)
+ last_bad_ret = ret;
+
+ ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+ if (ret)
+ last_bad_ret = ret;
+
+ return last_bad_ret;
+}
+
+static int hclge_ieee_setapp(struct hnae3_handle *h, struct dcb_app *app)
+{
+ struct hclge_vport *vport = hclge_get_vport(h);
+ struct net_device *netdev = h->kinfo.netdev;
+ struct hclge_dev *hdev = vport->back;
+ struct dcb_app old_app;
+ int ret;
+
+ if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP ||
+ app->protocol >= HNAE3_MAX_DSCP ||
+ app->priority >= HNAE3_MAX_USER_PRIO)
+ return -EINVAL;
+
+ dev_info(&hdev->pdev->dev, "setapp dscp=%u priority=%u\n",
+ app->protocol, app->priority);
+
+ if (app->priority == h->kinfo.dscp_prio[app->protocol])
+ return 0;
+
+ ret = dcb_ieee_setapp(netdev, app);
+ if (ret)
+ return ret;
+
+ old_app.selector = IEEE_8021QAZ_APP_SEL_DSCP;
+ old_app.protocol = app->protocol;
+ old_app.priority = h->kinfo.dscp_prio[app->protocol];
+
+ h->kinfo.dscp_prio[app->protocol] = app->priority;
+ ret = hclge_dscp_to_tc_map(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to set dscp to tc map, ret = %d\n", ret);
+ h->kinfo.dscp_prio[app->protocol] = old_app.priority;
+ (void)dcb_ieee_delapp(netdev, app);
+ return ret;
+ }
+
+ vport->nic.kinfo.tc_map_mode = HNAE3_TC_MAP_MODE_DSCP;
+ if (old_app.priority == HNAE3_PRIO_ID_INVALID)
+ h->kinfo.dscp_app_cnt++;
+ else
+ ret = dcb_ieee_delapp(netdev, &old_app);
+
+ return ret;
+}
+
+static int hclge_ieee_delapp(struct hnae3_handle *h, struct dcb_app *app)
+{
+ struct hclge_vport *vport = hclge_get_vport(h);
+ struct net_device *netdev = h->kinfo.netdev;
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP ||
+ app->protocol >= HNAE3_MAX_DSCP ||
+ app->priority >= HNAE3_MAX_USER_PRIO ||
+ app->priority != h->kinfo.dscp_prio[app->protocol])
+ return -EINVAL;
+
+ dev_info(&hdev->pdev->dev, "delapp dscp=%u priority=%u\n",
+ app->protocol, app->priority);
+
+ ret = dcb_ieee_delapp(netdev, app);
+ if (ret)
+ return ret;
+
+ h->kinfo.dscp_prio[app->protocol] = HNAE3_PRIO_ID_INVALID;
+ ret = hclge_dscp_to_tc_map(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to del dscp to tc map, ret = %d\n", ret);
+ h->kinfo.dscp_prio[app->protocol] = app->priority;
+ (void)dcb_ieee_setapp(netdev, app);
+ return ret;
+ }
+
+ if (h->kinfo.dscp_app_cnt)
+ h->kinfo.dscp_app_cnt--;
+
+ if (!h->kinfo.dscp_app_cnt) {
+ vport->nic.kinfo.tc_map_mode = HNAE3_TC_MAP_MODE_PRIO;
+ ret = hclge_up_to_tc_map(hdev);
+ }
+
+ return ret;
+}
+
+/* DCBX configuration */
+static u8 hclge_getdcbx(struct hnae3_handle *h)
+{
+ struct hclge_vport *vport = hclge_get_vport(h);
+ struct hclge_dev *hdev = vport->back;
+
+ if (h->kinfo.tc_info.mqprio_active)
+ return 0;
+
+ return hdev->dcbx_cap;
+}
+
+static u8 hclge_setdcbx(struct hnae3_handle *h, u8 mode)
+{
+ struct hclge_vport *vport = hclge_get_vport(h);
+ struct net_device *netdev = h->kinfo.netdev;
+ struct hclge_dev *hdev = vport->back;
+
+ netif_dbg(h, drv, netdev, "set dcbx: mode=%u\n", mode);
+
+ /* No support for LLD_MANAGED modes or CEE */
+ if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
+ (mode & DCB_CAP_DCBX_VER_CEE) ||
+ !(mode & DCB_CAP_DCBX_HOST))
+ return 1;
+
+ hdev->dcbx_cap = mode;
+
+ return 0;
+}
+
+static int hclge_mqprio_qopt_check(struct hclge_dev *hdev,
+ struct tc_mqprio_qopt_offload *mqprio_qopt)
+{
+ u16 queue_sum = 0;
+ int ret;
+ int i;
+
+ if (!mqprio_qopt->qopt.num_tc) {
+ mqprio_qopt->qopt.num_tc = 1;
+ return 0;
+ }
+
+ ret = hclge_dcb_common_validate(hdev, mqprio_qopt->qopt.num_tc,
+ mqprio_qopt->qopt.prio_tc_map);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < mqprio_qopt->qopt.num_tc; i++) {
+ if (!is_power_of_2(mqprio_qopt->qopt.count[i])) {
+ dev_err(&hdev->pdev->dev,
+ "qopt queue count must be power of 2\n");
+ return -EINVAL;
+ }
+
+ if (mqprio_qopt->qopt.count[i] > hdev->pf_rss_size_max) {
+ dev_err(&hdev->pdev->dev,
+ "qopt queue count should be no more than %u\n",
+ hdev->pf_rss_size_max);
+ return -EINVAL;
+ }
+
+ if (mqprio_qopt->qopt.offset[i] != queue_sum) {
+ dev_err(&hdev->pdev->dev,
+ "qopt queue offset must start from 0, and being continuous\n");
+ return -EINVAL;
+ }
+
+ if (mqprio_qopt->min_rate[i] || mqprio_qopt->max_rate[i]) {
+ dev_err(&hdev->pdev->dev,
+ "qopt tx_rate is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ queue_sum = mqprio_qopt->qopt.offset[i];
+ queue_sum += mqprio_qopt->qopt.count[i];
+ }
+ if (hdev->vport[0].alloc_tqps < queue_sum) {
+ dev_err(&hdev->pdev->dev,
+ "qopt queue count sum should be less than %u\n",
+ hdev->vport[0].alloc_tqps);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void hclge_sync_mqprio_qopt(struct hnae3_tc_info *tc_info,
+ struct tc_mqprio_qopt_offload *mqprio_qopt)
+{
+ memset(tc_info, 0, sizeof(*tc_info));
+ tc_info->num_tc = mqprio_qopt->qopt.num_tc;
+ memcpy(tc_info->prio_tc, mqprio_qopt->qopt.prio_tc_map,
+ sizeof_field(struct hnae3_tc_info, prio_tc));
+ memcpy(tc_info->tqp_count, mqprio_qopt->qopt.count,
+ sizeof_field(struct hnae3_tc_info, tqp_count));
+ memcpy(tc_info->tqp_offset, mqprio_qopt->qopt.offset,
+ sizeof_field(struct hnae3_tc_info, tqp_offset));
+}
+
+static int hclge_config_tc(struct hclge_dev *hdev,
+ struct hnae3_tc_info *tc_info)
+{
+ int i;
+
+ hclge_tm_schd_info_update(hdev, tc_info->num_tc);
+ for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
+ hdev->tm_info.prio_tc[i] = tc_info->prio_tc[i];
+
+ return hclge_map_update(hdev);
+}
+
+/* Set up TC for hardware offloaded mqprio in channel mode */
+static int hclge_setup_tc(struct hnae3_handle *h,
+ struct tc_mqprio_qopt_offload *mqprio_qopt)
+{
+ struct hclge_vport *vport = hclge_get_vport(h);
+ struct hnae3_knic_private_info *kinfo;
+ struct hclge_dev *hdev = vport->back;
+ struct hnae3_tc_info old_tc_info;
+ u8 tc = mqprio_qopt->qopt.num_tc;
+ int ret;
+
+ /* if client unregistered, it's not allowed to change
+ * mqprio configuration, which may cause uninit ring
+ * fail.
+ */
+ if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
+ return -EBUSY;
+
+ kinfo = &vport->nic.kinfo;
+ if (kinfo->tc_info.dcb_ets_active)
+ return -EINVAL;
+
+ ret = hclge_mqprio_qopt_check(hdev, mqprio_qopt);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to check mqprio qopt params, ret = %d\n", ret);
+ return ret;
+ }
+
+ ret = hclge_notify_down_uinit(hdev);
+ if (ret)
+ return ret;
+
+ memcpy(&old_tc_info, &kinfo->tc_info, sizeof(old_tc_info));
+ hclge_sync_mqprio_qopt(&kinfo->tc_info, mqprio_qopt);
+ kinfo->tc_info.mqprio_active = tc > 0;
+
+ ret = hclge_config_tc(hdev, &kinfo->tc_info);
+ if (ret)
+ goto err_out;
+
+ return hclge_notify_init_up(hdev);
+
+err_out:
+ if (!tc) {
+ dev_warn(&hdev->pdev->dev,
+ "failed to destroy mqprio, will active after reset, ret = %d\n",
+ ret);
+ } else {
+ /* roll-back */
+ memcpy(&kinfo->tc_info, &old_tc_info, sizeof(old_tc_info));
+ if (hclge_config_tc(hdev, &kinfo->tc_info))
+ dev_err(&hdev->pdev->dev,
+ "failed to roll back tc configuration\n");
+ }
+ hclge_notify_init_up(hdev);
+
+ return ret;
+}
+
+static const struct hnae3_dcb_ops hns3_dcb_ops = {
+ .ieee_getets = hclge_ieee_getets,
+ .ieee_setets = hclge_ieee_setets,
+ .ieee_getpfc = hclge_ieee_getpfc,
+ .ieee_setpfc = hclge_ieee_setpfc,
+ .ieee_setapp = hclge_ieee_setapp,
+ .ieee_delapp = hclge_ieee_delapp,
+ .getdcbx = hclge_getdcbx,
+ .setdcbx = hclge_setdcbx,
+ .setup_tc = hclge_setup_tc,
+};
+
+void hclge_dcb_ops_set(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport = hdev->vport;
+ struct hnae3_knic_private_info *kinfo;
+
+ /* Hdev does not support DCB or vport is
+ * not a pf, then dcb_ops is not set.
+ */
+ if (!hnae3_dev_dcb_supported(hdev) ||
+ vport->vport_id != 0)
+ return;
+
+ kinfo = &vport->nic.kinfo;
+ kinfo->dcb_ops = &hns3_dcb_ops;
+ hdev->dcbx_cap = DCB_CAP_DCBX_VER_IEEE | DCB_CAP_DCBX_HOST;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h
new file mode 100644
index 0000000000..b04702e656
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+// Copyright (c) 2016-2017 Hisilicon Limited.
+
+#ifndef __HCLGE_DCB_H__
+#define __HCLGE_DCB_H__
+
+#include "hclge_main.h"
+
+#ifdef CONFIG_HNS3_DCB
+void hclge_dcb_ops_set(struct hclge_dev *hdev);
+#else
+static inline void hclge_dcb_ops_set(struct hclge_dev *hdev) {}
+#endif
+
+#endif /* __HCLGE_DCB_H__ */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
new file mode 100644
index 0000000000..ff3f8f424a
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -0,0 +1,2589 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2018-2019 Hisilicon Limited. */
+
+#include <linux/device.h>
+#include <linux/sched/clock.h>
+
+#include "hclge_debugfs.h"
+#include "hclge_err.h"
+#include "hclge_main.h"
+#include "hclge_regs.h"
+#include "hclge_tm.h"
+#include "hnae3.h"
+
+static const char * const state_str[] = { "off", "on" };
+static const char * const hclge_mac_state_str[] = {
+ "TO_ADD", "TO_DEL", "ACTIVE"
+};
+
+static const char * const tc_map_mode_str[] = { "PRIO", "DSCP" };
+
+static const struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = {
+ { .cmd = HNAE3_DBG_CMD_REG_BIOS_COMMON,
+ .dfx_msg = &hclge_dbg_bios_common_reg[0],
+ .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_bios_common_reg),
+ .offset = HCLGE_DBG_DFX_BIOS_OFFSET,
+ .cmd = HCLGE_OPC_DFX_BIOS_COMMON_REG } },
+ { .cmd = HNAE3_DBG_CMD_REG_SSU,
+ .dfx_msg = &hclge_dbg_ssu_reg_0[0],
+ .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_0),
+ .offset = HCLGE_DBG_DFX_SSU_0_OFFSET,
+ .cmd = HCLGE_OPC_DFX_SSU_REG_0 } },
+ { .cmd = HNAE3_DBG_CMD_REG_SSU,
+ .dfx_msg = &hclge_dbg_ssu_reg_1[0],
+ .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_1),
+ .offset = HCLGE_DBG_DFX_SSU_1_OFFSET,
+ .cmd = HCLGE_OPC_DFX_SSU_REG_1 } },
+ { .cmd = HNAE3_DBG_CMD_REG_SSU,
+ .dfx_msg = &hclge_dbg_ssu_reg_2[0],
+ .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_2),
+ .offset = HCLGE_DBG_DFX_SSU_2_OFFSET,
+ .cmd = HCLGE_OPC_DFX_SSU_REG_2 } },
+ { .cmd = HNAE3_DBG_CMD_REG_IGU_EGU,
+ .dfx_msg = &hclge_dbg_igu_egu_reg[0],
+ .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_igu_egu_reg),
+ .offset = HCLGE_DBG_DFX_IGU_OFFSET,
+ .cmd = HCLGE_OPC_DFX_IGU_EGU_REG } },
+ { .cmd = HNAE3_DBG_CMD_REG_RPU,
+ .dfx_msg = &hclge_dbg_rpu_reg_0[0],
+ .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_0),
+ .offset = HCLGE_DBG_DFX_RPU_0_OFFSET,
+ .cmd = HCLGE_OPC_DFX_RPU_REG_0 } },
+ { .cmd = HNAE3_DBG_CMD_REG_RPU,
+ .dfx_msg = &hclge_dbg_rpu_reg_1[0],
+ .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_1),
+ .offset = HCLGE_DBG_DFX_RPU_1_OFFSET,
+ .cmd = HCLGE_OPC_DFX_RPU_REG_1 } },
+ { .cmd = HNAE3_DBG_CMD_REG_NCSI,
+ .dfx_msg = &hclge_dbg_ncsi_reg[0],
+ .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ncsi_reg),
+ .offset = HCLGE_DBG_DFX_NCSI_OFFSET,
+ .cmd = HCLGE_OPC_DFX_NCSI_REG } },
+ { .cmd = HNAE3_DBG_CMD_REG_RTC,
+ .dfx_msg = &hclge_dbg_rtc_reg[0],
+ .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rtc_reg),
+ .offset = HCLGE_DBG_DFX_RTC_OFFSET,
+ .cmd = HCLGE_OPC_DFX_RTC_REG } },
+ { .cmd = HNAE3_DBG_CMD_REG_PPP,
+ .dfx_msg = &hclge_dbg_ppp_reg[0],
+ .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ppp_reg),
+ .offset = HCLGE_DBG_DFX_PPP_OFFSET,
+ .cmd = HCLGE_OPC_DFX_PPP_REG } },
+ { .cmd = HNAE3_DBG_CMD_REG_RCB,
+ .dfx_msg = &hclge_dbg_rcb_reg[0],
+ .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rcb_reg),
+ .offset = HCLGE_DBG_DFX_RCB_OFFSET,
+ .cmd = HCLGE_OPC_DFX_RCB_REG } },
+ { .cmd = HNAE3_DBG_CMD_REG_TQP,
+ .dfx_msg = &hclge_dbg_tqp_reg[0],
+ .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_tqp_reg),
+ .offset = HCLGE_DBG_DFX_TQP_OFFSET,
+ .cmd = HCLGE_OPC_DFX_TQP_REG } },
+};
+
+/* make sure: len(name) + interval >= maxlen(item data) + 2,
+ * for example, name = "pkt_num"(len: 7), the prototype of item data is u32,
+ * and print as "%u"(maxlen: 10), so the interval should be at least 5.
+ */
+static void hclge_dbg_fill_content(char *content, u16 len,
+ const struct hclge_dbg_item *items,
+ const char **result, u16 size)
+{
+#define HCLGE_DBG_LINE_END_LEN 2
+ char *pos = content;
+ u16 item_len;
+ u16 i;
+
+ if (!len) {
+ return;
+ } else if (len <= HCLGE_DBG_LINE_END_LEN) {
+ *pos++ = '\0';
+ return;
+ }
+
+ memset(content, ' ', len);
+ len -= HCLGE_DBG_LINE_END_LEN;
+
+ for (i = 0; i < size; i++) {
+ item_len = strlen(items[i].name) + items[i].interval;
+ if (len < item_len)
+ break;
+
+ if (result) {
+ if (item_len < strlen(result[i]))
+ break;
+ memcpy(pos, result[i], strlen(result[i]));
+ } else {
+ memcpy(pos, items[i].name, strlen(items[i].name));
+ }
+ pos += item_len;
+ len -= item_len;
+ }
+ *pos++ = '\n';
+ *pos++ = '\0';
+}
+
+static char *hclge_dbg_get_func_id_str(char *buf, u8 id)
+{
+ if (id)
+ sprintf(buf, "vf%u", id - 1U);
+ else
+ sprintf(buf, "pf");
+
+ return buf;
+}
+
+static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset,
+ u32 *bd_num)
+{
+ struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
+ int entries_per_desc;
+ int index;
+ int ret;
+
+ ret = hclge_query_bd_num_cmd_send(hdev, desc);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get dfx bd_num, offset = %d, ret = %d\n",
+ offset, ret);
+ return ret;
+ }
+
+ entries_per_desc = ARRAY_SIZE(desc[0].data);
+ index = offset % entries_per_desc;
+
+ *bd_num = le32_to_cpu(desc[offset / entries_per_desc].data[index]);
+ if (!(*bd_num)) {
+ dev_err(&hdev->pdev->dev, "The value of dfx bd_num is 0!\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hclge_dbg_cmd_send(struct hclge_dev *hdev,
+ struct hclge_desc *desc_src,
+ int index, int bd_num,
+ enum hclge_opcode_type cmd)
+{
+ struct hclge_desc *desc = desc_src;
+ int ret, i;
+
+ hclge_cmd_setup_basic_desc(desc, cmd, true);
+ desc->data[0] = cpu_to_le32(index);
+
+ for (i = 1; i < bd_num; i++) {
+ desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ desc++;
+ hclge_cmd_setup_basic_desc(desc, cmd, true);
+ }
+
+ ret = hclge_cmd_send(&hdev->hw, desc_src, bd_num);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "cmd(0x%x) send fail, ret = %d\n", cmd, ret);
+ return ret;
+}
+
+static int
+hclge_dbg_dump_reg_tqp(struct hclge_dev *hdev,
+ const struct hclge_dbg_reg_type_info *reg_info,
+ char *buf, int len, int *pos)
+{
+ const struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg;
+ const struct hclge_dbg_reg_common_msg *reg_msg = &reg_info->reg_msg;
+ struct hclge_desc *desc_src;
+ u32 index, entry, i, cnt;
+ int bd_num, min_num, ret;
+ struct hclge_desc *desc;
+
+ ret = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset, &bd_num);
+ if (ret)
+ return ret;
+
+ desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
+ if (!desc_src)
+ return -ENOMEM;
+
+ min_num = min_t(int, bd_num * HCLGE_DESC_DATA_LEN, reg_msg->msg_num);
+
+ for (i = 0, cnt = 0; i < min_num; i++, dfx_message++)
+ *pos += scnprintf(buf + *pos, len - *pos, "item%u = %s\n",
+ cnt++, dfx_message->message);
+
+ for (i = 0; i < cnt; i++)
+ *pos += scnprintf(buf + *pos, len - *pos, "item%u\t", i);
+
+ *pos += scnprintf(buf + *pos, len - *pos, "\n");
+
+ for (index = 0; index < hdev->vport[0].alloc_tqps; index++) {
+ dfx_message = reg_info->dfx_msg;
+ desc = desc_src;
+ ret = hclge_dbg_cmd_send(hdev, desc, index, bd_num,
+ reg_msg->cmd);
+ if (ret)
+ break;
+
+ for (i = 0; i < min_num; i++, dfx_message++) {
+ entry = i % HCLGE_DESC_DATA_LEN;
+ if (i > 0 && !entry)
+ desc++;
+
+ *pos += scnprintf(buf + *pos, len - *pos, "%#x\t",
+ le32_to_cpu(desc->data[entry]));
+ }
+ *pos += scnprintf(buf + *pos, len - *pos, "\n");
+ }
+
+ kfree(desc_src);
+ return ret;
+}
+
+static int
+hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
+ const struct hclge_dbg_reg_type_info *reg_info,
+ char *buf, int len, int *pos)
+{
+ const struct hclge_dbg_reg_common_msg *reg_msg = &reg_info->reg_msg;
+ const struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg;
+ struct hclge_desc *desc_src;
+ int bd_num, min_num, ret;
+ struct hclge_desc *desc;
+ u32 entry, i;
+
+ ret = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset, &bd_num);
+ if (ret)
+ return ret;
+
+ desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
+ if (!desc_src)
+ return -ENOMEM;
+
+ desc = desc_src;
+
+ ret = hclge_dbg_cmd_send(hdev, desc, 0, bd_num, reg_msg->cmd);
+ if (ret) {
+ kfree(desc);
+ return ret;
+ }
+
+ min_num = min_t(int, bd_num * HCLGE_DESC_DATA_LEN, reg_msg->msg_num);
+
+ for (i = 0; i < min_num; i++, dfx_message++) {
+ entry = i % HCLGE_DESC_DATA_LEN;
+ if (i > 0 && !entry)
+ desc++;
+ if (!dfx_message->flag)
+ continue;
+
+ *pos += scnprintf(buf + *pos, len - *pos, "%s: %#x\n",
+ dfx_message->message,
+ le32_to_cpu(desc->data[entry]));
+ }
+
+ kfree(desc_src);
+ return 0;
+}
+
+static const struct hclge_dbg_status_dfx_info hclge_dbg_mac_en_status[] = {
+ {HCLGE_MAC_TX_EN_B, "mac_trans_en"},
+ {HCLGE_MAC_RX_EN_B, "mac_rcv_en"},
+ {HCLGE_MAC_PAD_TX_B, "pad_trans_en"},
+ {HCLGE_MAC_PAD_RX_B, "pad_rcv_en"},
+ {HCLGE_MAC_1588_TX_B, "1588_trans_en"},
+ {HCLGE_MAC_1588_RX_B, "1588_rcv_en"},
+ {HCLGE_MAC_APP_LP_B, "mac_app_loop_en"},
+ {HCLGE_MAC_LINE_LP_B, "mac_line_loop_en"},
+ {HCLGE_MAC_FCS_TX_B, "mac_fcs_tx_en"},
+ {HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, "mac_rx_oversize_truncate_en"},
+ {HCLGE_MAC_RX_FCS_STRIP_B, "mac_rx_fcs_strip_en"},
+ {HCLGE_MAC_RX_FCS_B, "mac_rx_fcs_en"},
+ {HCLGE_MAC_TX_UNDER_MIN_ERR_B, "mac_tx_under_min_err_en"},
+ {HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, "mac_tx_oversize_truncate_en"}
+};
+
+static int hclge_dbg_dump_mac_enable_status(struct hclge_dev *hdev, char *buf,
+ int len, int *pos)
+{
+ struct hclge_config_mac_mode_cmd *req;
+ struct hclge_desc desc;
+ u32 loop_en, i, offset;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to dump mac enable status, ret = %d\n", ret);
+ return ret;
+ }
+
+ req = (struct hclge_config_mac_mode_cmd *)desc.data;
+ loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
+
+ for (i = 0; i < ARRAY_SIZE(hclge_dbg_mac_en_status); i++) {
+ offset = hclge_dbg_mac_en_status[i].offset;
+ *pos += scnprintf(buf + *pos, len - *pos, "%s: %#x\n",
+ hclge_dbg_mac_en_status[i].message,
+ hnae3_get_bit(loop_en, offset));
+ }
+
+ return 0;
+}
+
+static int hclge_dbg_dump_mac_frame_size(struct hclge_dev *hdev, char *buf,
+ int len, int *pos)
+{
+ struct hclge_config_max_frm_size_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, true);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to dump mac frame size, ret = %d\n", ret);
+ return ret;
+ }
+
+ req = (struct hclge_config_max_frm_size_cmd *)desc.data;
+
+ *pos += scnprintf(buf + *pos, len - *pos, "max_frame_size: %u\n",
+ le16_to_cpu(req->max_frm_size));
+ *pos += scnprintf(buf + *pos, len - *pos, "min_frame_size: %u\n",
+ req->min_frm_size);
+
+ return 0;
+}
+
+static int hclge_dbg_dump_mac_speed_duplex(struct hclge_dev *hdev, char *buf,
+ int len, int *pos)
+{
+#define HCLGE_MAC_SPEED_SHIFT 0
+#define HCLGE_MAC_SPEED_MASK GENMASK(5, 0)
+#define HCLGE_MAC_DUPLEX_SHIFT 7
+
+ struct hclge_config_mac_speed_dup_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, true);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to dump mac speed duplex, ret = %d\n", ret);
+ return ret;
+ }
+
+ req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
+
+ *pos += scnprintf(buf + *pos, len - *pos, "speed: %#lx\n",
+ hnae3_get_field(req->speed_dup, HCLGE_MAC_SPEED_MASK,
+ HCLGE_MAC_SPEED_SHIFT));
+ *pos += scnprintf(buf + *pos, len - *pos, "duplex: %#x\n",
+ hnae3_get_bit(req->speed_dup,
+ HCLGE_MAC_DUPLEX_SHIFT));
+ return 0;
+}
+
+static int hclge_dbg_dump_mac(struct hclge_dev *hdev, char *buf, int len)
+{
+ int pos = 0;
+ int ret;
+
+ ret = hclge_dbg_dump_mac_enable_status(hdev, buf, len, &pos);
+ if (ret)
+ return ret;
+
+ ret = hclge_dbg_dump_mac_frame_size(hdev, buf, len, &pos);
+ if (ret)
+ return ret;
+
+ return hclge_dbg_dump_mac_speed_duplex(hdev, buf, len, &pos);
+}
+
+static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, char *buf, int len,
+ int *pos)
+{
+ struct hclge_dbg_bitmap_cmd req;
+ struct hclge_desc desc;
+ u16 qset_id, qset_num;
+ int ret;
+
+ ret = hclge_tm_get_qset_num(hdev, &qset_num);
+ if (ret)
+ return ret;
+
+ *pos += scnprintf(buf + *pos, len - *pos,
+ "qset_id roce_qset_mask nic_qset_mask qset_shaping_pass qset_bp_status\n");
+ for (qset_id = 0; qset_id < qset_num; qset_id++) {
+ ret = hclge_dbg_cmd_send(hdev, &desc, qset_id, 1,
+ HCLGE_OPC_QSET_DFX_STS);
+ if (ret)
+ return ret;
+
+ req.bitmap = (u8)le32_to_cpu(desc.data[1]);
+
+ *pos += scnprintf(buf + *pos, len - *pos,
+ "%04u %#x %#x %#x %#x\n",
+ qset_id, req.bit0, req.bit1, req.bit2,
+ req.bit3);
+ }
+
+ return 0;
+}
+
+static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, char *buf, int len,
+ int *pos)
+{
+ struct hclge_dbg_bitmap_cmd req;
+ struct hclge_desc desc;
+ u8 pri_id, pri_num;
+ int ret;
+
+ ret = hclge_tm_get_pri_num(hdev, &pri_num);
+ if (ret)
+ return ret;
+
+ *pos += scnprintf(buf + *pos, len - *pos,
+ "pri_id pri_mask pri_cshaping_pass pri_pshaping_pass\n");
+ for (pri_id = 0; pri_id < pri_num; pri_id++) {
+ ret = hclge_dbg_cmd_send(hdev, &desc, pri_id, 1,
+ HCLGE_OPC_PRI_DFX_STS);
+ if (ret)
+ return ret;
+
+ req.bitmap = (u8)le32_to_cpu(desc.data[1]);
+
+ *pos += scnprintf(buf + *pos, len - *pos,
+ "%03u %#x %#x %#x\n",
+ pri_id, req.bit0, req.bit1, req.bit2);
+ }
+
+ return 0;
+}
+
+static int hclge_dbg_dump_dcb_pg(struct hclge_dev *hdev, char *buf, int len,
+ int *pos)
+{
+ struct hclge_dbg_bitmap_cmd req;
+ struct hclge_desc desc;
+ u8 pg_id;
+ int ret;
+
+ *pos += scnprintf(buf + *pos, len - *pos,
+ "pg_id pg_mask pg_cshaping_pass pg_pshaping_pass\n");
+ for (pg_id = 0; pg_id < hdev->tm_info.num_pg; pg_id++) {
+ ret = hclge_dbg_cmd_send(hdev, &desc, pg_id, 1,
+ HCLGE_OPC_PG_DFX_STS);
+ if (ret)
+ return ret;
+
+ req.bitmap = (u8)le32_to_cpu(desc.data[1]);
+
+ *pos += scnprintf(buf + *pos, len - *pos,
+ "%03u %#x %#x %#x\n",
+ pg_id, req.bit0, req.bit1, req.bit2);
+ }
+
+ return 0;
+}
+
+static int hclge_dbg_dump_dcb_queue(struct hclge_dev *hdev, char *buf, int len,
+ int *pos)
+{
+ struct hclge_desc desc;
+ u16 nq_id;
+ int ret;
+
+ *pos += scnprintf(buf + *pos, len - *pos,
+ "nq_id sch_nic_queue_cnt sch_roce_queue_cnt\n");
+ for (nq_id = 0; nq_id < hdev->num_tqps; nq_id++) {
+ ret = hclge_dbg_cmd_send(hdev, &desc, nq_id, 1,
+ HCLGE_OPC_SCH_NQ_CNT);
+ if (ret)
+ return ret;
+
+ *pos += scnprintf(buf + *pos, len - *pos, "%04u %#x",
+ nq_id, le32_to_cpu(desc.data[1]));
+
+ ret = hclge_dbg_cmd_send(hdev, &desc, nq_id, 1,
+ HCLGE_OPC_SCH_RQ_CNT);
+ if (ret)
+ return ret;
+
+ *pos += scnprintf(buf + *pos, len - *pos,
+ " %#x\n",
+ le32_to_cpu(desc.data[1]));
+ }
+
+ return 0;
+}
+
+static int hclge_dbg_dump_dcb_port(struct hclge_dev *hdev, char *buf, int len,
+ int *pos)
+{
+ struct hclge_dbg_bitmap_cmd req;
+ struct hclge_desc desc;
+ u8 port_id = 0;
+ int ret;
+
+ ret = hclge_dbg_cmd_send(hdev, &desc, port_id, 1,
+ HCLGE_OPC_PORT_DFX_STS);
+ if (ret)
+ return ret;
+
+ req.bitmap = (u8)le32_to_cpu(desc.data[1]);
+
+ *pos += scnprintf(buf + *pos, len - *pos, "port_mask: %#x\n",
+ req.bit0);
+ *pos += scnprintf(buf + *pos, len - *pos, "port_shaping_pass: %#x\n",
+ req.bit1);
+
+ return 0;
+}
+
+static int hclge_dbg_dump_dcb_tm(struct hclge_dev *hdev, char *buf, int len,
+ int *pos)
+{
+ struct hclge_desc desc[2];
+ u8 port_id = 0;
+ int ret;
+
+ ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
+ HCLGE_OPC_TM_INTERNAL_CNT);
+ if (ret)
+ return ret;
+
+ *pos += scnprintf(buf + *pos, len - *pos, "SCH_NIC_NUM: %#x\n",
+ le32_to_cpu(desc[0].data[1]));
+ *pos += scnprintf(buf + *pos, len - *pos, "SCH_ROCE_NUM: %#x\n",
+ le32_to_cpu(desc[0].data[2]));
+
+ ret = hclge_dbg_cmd_send(hdev, desc, port_id, 2,
+ HCLGE_OPC_TM_INTERNAL_STS);
+ if (ret)
+ return ret;
+
+ *pos += scnprintf(buf + *pos, len - *pos, "pri_bp: %#x\n",
+ le32_to_cpu(desc[0].data[1]));
+ *pos += scnprintf(buf + *pos, len - *pos, "fifo_dfx_info: %#x\n",
+ le32_to_cpu(desc[0].data[2]));
+ *pos += scnprintf(buf + *pos, len - *pos,
+ "sch_roce_fifo_afull_gap: %#x\n",
+ le32_to_cpu(desc[0].data[3]));
+ *pos += scnprintf(buf + *pos, len - *pos,
+ "tx_private_waterline: %#x\n",
+ le32_to_cpu(desc[0].data[4]));
+ *pos += scnprintf(buf + *pos, len - *pos, "tm_bypass_en: %#x\n",
+ le32_to_cpu(desc[0].data[5]));
+ *pos += scnprintf(buf + *pos, len - *pos, "SSU_TM_BYPASS_EN: %#x\n",
+ le32_to_cpu(desc[1].data[0]));
+ *pos += scnprintf(buf + *pos, len - *pos, "SSU_RESERVE_CFG: %#x\n",
+ le32_to_cpu(desc[1].data[1]));
+
+ if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER)
+ return 0;
+
+ ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
+ HCLGE_OPC_TM_INTERNAL_STS_1);
+ if (ret)
+ return ret;
+
+ *pos += scnprintf(buf + *pos, len - *pos, "TC_MAP_SEL: %#x\n",
+ le32_to_cpu(desc[0].data[1]));
+ *pos += scnprintf(buf + *pos, len - *pos, "IGU_PFC_PRI_EN: %#x\n",
+ le32_to_cpu(desc[0].data[2]));
+ *pos += scnprintf(buf + *pos, len - *pos, "MAC_PFC_PRI_EN: %#x\n",
+ le32_to_cpu(desc[0].data[3]));
+ *pos += scnprintf(buf + *pos, len - *pos, "IGU_PRI_MAP_TC_CFG: %#x\n",
+ le32_to_cpu(desc[0].data[4]));
+ *pos += scnprintf(buf + *pos, len - *pos,
+ "IGU_TX_PRI_MAP_TC_CFG: %#x\n",
+ le32_to_cpu(desc[0].data[5]));
+
+ return 0;
+}
+
+static int hclge_dbg_dump_dcb(struct hclge_dev *hdev, char *buf, int len)
+{
+ int pos = 0;
+ int ret;
+
+ ret = hclge_dbg_dump_dcb_qset(hdev, buf, len, &pos);
+ if (ret)
+ return ret;
+
+ ret = hclge_dbg_dump_dcb_pri(hdev, buf, len, &pos);
+ if (ret)
+ return ret;
+
+ ret = hclge_dbg_dump_dcb_pg(hdev, buf, len, &pos);
+ if (ret)
+ return ret;
+
+ ret = hclge_dbg_dump_dcb_queue(hdev, buf, len, &pos);
+ if (ret)
+ return ret;
+
+ ret = hclge_dbg_dump_dcb_port(hdev, buf, len, &pos);
+ if (ret)
+ return ret;
+
+ return hclge_dbg_dump_dcb_tm(hdev, buf, len, &pos);
+}
+
+static int hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev,
+ enum hnae3_dbg_cmd cmd, char *buf, int len)
+{
+ const struct hclge_dbg_reg_type_info *reg_info;
+ int pos = 0, ret = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hclge_dbg_reg_info); i++) {
+ reg_info = &hclge_dbg_reg_info[i];
+ if (cmd == reg_info->cmd) {
+ if (cmd == HNAE3_DBG_CMD_REG_TQP)
+ return hclge_dbg_dump_reg_tqp(hdev, reg_info,
+ buf, len, &pos);
+
+ ret = hclge_dbg_dump_reg_common(hdev, reg_info, buf,
+ len, &pos);
+ if (ret)
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int hclge_dbg_dump_tc(struct hclge_dev *hdev, char *buf, int len)
+{
+ struct hclge_ets_tc_weight_cmd *ets_weight;
+ struct hclge_desc desc;
+ char *sch_mode_str;
+ int pos = 0;
+ int ret;
+ u8 i;
+
+ if (!hnae3_dev_dcb_supported(hdev)) {
+ dev_err(&hdev->pdev->dev,
+ "Only DCB-supported dev supports tc\n");
+ return -EOPNOTSUPP;
+ }
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "failed to get tc weight, ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data;
+
+ pos += scnprintf(buf + pos, len - pos, "enabled tc number: %u\n",
+ hdev->tm_info.num_tc);
+ pos += scnprintf(buf + pos, len - pos, "weight_offset: %u\n",
+ ets_weight->weight_offset);
+
+ pos += scnprintf(buf + pos, len - pos, "TC MODE WEIGHT\n");
+ for (i = 0; i < HNAE3_MAX_TC; i++) {
+ sch_mode_str = ets_weight->tc_weight[i] ? "dwrr" : "sp";
+ pos += scnprintf(buf + pos, len - pos, "%u %4s %3u\n",
+ i, sch_mode_str, ets_weight->tc_weight[i]);
+ }
+
+ return 0;
+}
+
+static const struct hclge_dbg_item tm_pg_items[] = {
+ { "ID", 2 },
+ { "PRI_MAP", 2 },
+ { "MODE", 2 },
+ { "DWRR", 2 },
+ { "C_IR_B", 2 },
+ { "C_IR_U", 2 },
+ { "C_IR_S", 2 },
+ { "C_BS_B", 2 },
+ { "C_BS_S", 2 },
+ { "C_FLAG", 2 },
+ { "C_RATE(Mbps)", 2 },
+ { "P_IR_B", 2 },
+ { "P_IR_U", 2 },
+ { "P_IR_S", 2 },
+ { "P_BS_B", 2 },
+ { "P_BS_S", 2 },
+ { "P_FLAG", 2 },
+ { "P_RATE(Mbps)", 0 }
+};
+
+static void hclge_dbg_fill_shaper_content(struct hclge_tm_shaper_para *para,
+ char **result, u8 *index)
+{
+ sprintf(result[(*index)++], "%3u", para->ir_b);
+ sprintf(result[(*index)++], "%3u", para->ir_u);
+ sprintf(result[(*index)++], "%3u", para->ir_s);
+ sprintf(result[(*index)++], "%3u", para->bs_b);
+ sprintf(result[(*index)++], "%3u", para->bs_s);
+ sprintf(result[(*index)++], "%3u", para->flag);
+ sprintf(result[(*index)++], "%6u", para->rate);
+}
+
+static int __hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *data_str,
+ char *buf, int len)
+{
+ struct hclge_tm_shaper_para c_shaper_para, p_shaper_para;
+ char *result[ARRAY_SIZE(tm_pg_items)], *sch_mode_str;
+ u8 pg_id, sch_mode, weight, pri_bit_map, i, j;
+ char content[HCLGE_DBG_TM_INFO_LEN];
+ int pos = 0;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(tm_pg_items); i++) {
+ result[i] = data_str;
+ data_str += HCLGE_DBG_DATA_STR_LEN;
+ }
+
+ hclge_dbg_fill_content(content, sizeof(content), tm_pg_items,
+ NULL, ARRAY_SIZE(tm_pg_items));
+ pos += scnprintf(buf + pos, len - pos, "%s", content);
+
+ for (pg_id = 0; pg_id < hdev->tm_info.num_pg; pg_id++) {
+ ret = hclge_tm_get_pg_to_pri_map(hdev, pg_id, &pri_bit_map);
+ if (ret)
+ return ret;
+
+ ret = hclge_tm_get_pg_sch_mode(hdev, pg_id, &sch_mode);
+ if (ret)
+ return ret;
+
+ ret = hclge_tm_get_pg_weight(hdev, pg_id, &weight);
+ if (ret)
+ return ret;
+
+ ret = hclge_tm_get_pg_shaper(hdev, pg_id,
+ HCLGE_OPC_TM_PG_C_SHAPPING,
+ &c_shaper_para);
+ if (ret)
+ return ret;
+
+ ret = hclge_tm_get_pg_shaper(hdev, pg_id,
+ HCLGE_OPC_TM_PG_P_SHAPPING,
+ &p_shaper_para);
+ if (ret)
+ return ret;
+
+ sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
+ "sp";
+
+ j = 0;
+ sprintf(result[j++], "%02u", pg_id);
+ sprintf(result[j++], "0x%02x", pri_bit_map);
+ sprintf(result[j++], "%4s", sch_mode_str);
+ sprintf(result[j++], "%3u", weight);
+ hclge_dbg_fill_shaper_content(&c_shaper_para, result, &j);
+ hclge_dbg_fill_shaper_content(&p_shaper_para, result, &j);
+
+ hclge_dbg_fill_content(content, sizeof(content), tm_pg_items,
+ (const char **)result,
+ ARRAY_SIZE(tm_pg_items));
+ pos += scnprintf(buf + pos, len - pos, "%s", content);
+ }
+
+ return 0;
+}
+
+static int hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *buf, int len)
+{
+ char *data_str;
+ int ret;
+
+ data_str = kcalloc(ARRAY_SIZE(tm_pg_items),
+ HCLGE_DBG_DATA_STR_LEN, GFP_KERNEL);
+ if (!data_str)
+ return -ENOMEM;
+
+ ret = __hclge_dbg_dump_tm_pg(hdev, data_str, buf, len);
+
+ kfree(data_str);
+
+ return ret;
+}
+
+static int hclge_dbg_dump_tm_port(struct hclge_dev *hdev, char *buf, int len)
+{
+ struct hclge_tm_shaper_para shaper_para;
+ int pos = 0;
+ int ret;
+
+ ret = hclge_tm_get_port_shaper(hdev, &shaper_para);
+ if (ret)
+ return ret;
+
+ pos += scnprintf(buf + pos, len - pos,
+ "IR_B IR_U IR_S BS_B BS_S FLAG RATE(Mbps)\n");
+ pos += scnprintf(buf + pos, len - pos,
+ "%3u %3u %3u %3u %3u %1u %6u\n",
+ shaper_para.ir_b, shaper_para.ir_u, shaper_para.ir_s,
+ shaper_para.bs_b, shaper_para.bs_s, shaper_para.flag,
+ shaper_para.rate);
+
+ return 0;
+}
+
+static int hclge_dbg_dump_tm_bp_qset_map(struct hclge_dev *hdev, u8 tc_id,
+ char *buf, int len)
+{
+ u32 qset_mapping[HCLGE_BP_EXT_GRP_NUM];
+ struct hclge_bp_to_qs_map_cmd *map;
+ struct hclge_desc desc;
+ int pos = 0;
+ u8 group_id;
+ u8 grp_num;
+ u16 i = 0;
+ int ret;
+
+ grp_num = hdev->num_tqps <= HCLGE_TQP_MAX_SIZE_DEV_V2 ?
+ HCLGE_BP_GRP_NUM : HCLGE_BP_EXT_GRP_NUM;
+ map = (struct hclge_bp_to_qs_map_cmd *)desc.data;
+ for (group_id = 0; group_id < grp_num; group_id++) {
+ hclge_cmd_setup_basic_desc(&desc,
+ HCLGE_OPC_TM_BP_TO_QSET_MAPPING,
+ true);
+ map->tc_id = tc_id;
+ map->qs_group_id = group_id;
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get bp to qset map, ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ qset_mapping[group_id] = le32_to_cpu(map->qs_bit_map);
+ }
+
+ pos += scnprintf(buf + pos, len - pos, "INDEX | TM BP QSET MAPPING:\n");
+ for (group_id = 0; group_id < grp_num / 8; group_id++) {
+ pos += scnprintf(buf + pos, len - pos,
+ "%04d | %08x:%08x:%08x:%08x:%08x:%08x:%08x:%08x\n",
+ group_id * 256, qset_mapping[i + 7],
+ qset_mapping[i + 6], qset_mapping[i + 5],
+ qset_mapping[i + 4], qset_mapping[i + 3],
+ qset_mapping[i + 2], qset_mapping[i + 1],
+ qset_mapping[i]);
+ i += 8;
+ }
+
+ return pos;
+}
+
+static int hclge_dbg_dump_tm_map(struct hclge_dev *hdev, char *buf, int len)
+{
+ u16 queue_id;
+ u16 qset_id;
+ u8 link_vld;
+ int pos = 0;
+ u8 pri_id;
+ u8 tc_id;
+ int ret;
+
+ for (queue_id = 0; queue_id < hdev->num_tqps; queue_id++) {
+ ret = hclge_tm_get_q_to_qs_map(hdev, queue_id, &qset_id);
+ if (ret)
+ return ret;
+
+ ret = hclge_tm_get_qset_map_pri(hdev, qset_id, &pri_id,
+ &link_vld);
+ if (ret)
+ return ret;
+
+ ret = hclge_tm_get_q_to_tc(hdev, queue_id, &tc_id);
+ if (ret)
+ return ret;
+
+ pos += scnprintf(buf + pos, len - pos,
+ "QUEUE_ID QSET_ID PRI_ID TC_ID\n");
+ pos += scnprintf(buf + pos, len - pos,
+ "%04u %4u %3u %2u\n",
+ queue_id, qset_id, pri_id, tc_id);
+
+ if (!hnae3_dev_dcb_supported(hdev))
+ continue;
+
+ ret = hclge_dbg_dump_tm_bp_qset_map(hdev, tc_id, buf + pos,
+ len - pos);
+ if (ret < 0)
+ return ret;
+ pos += ret;
+
+ pos += scnprintf(buf + pos, len - pos, "\n");
+ }
+
+ return 0;
+}
+
+static int hclge_dbg_dump_tm_nodes(struct hclge_dev *hdev, char *buf, int len)
+{
+ struct hclge_tm_nodes_cmd *nodes;
+ struct hclge_desc desc;
+ int pos = 0;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to dump tm nodes, ret = %d\n", ret);
+ return ret;
+ }
+
+ nodes = (struct hclge_tm_nodes_cmd *)desc.data;
+
+ pos += scnprintf(buf + pos, len - pos, " BASE_ID MAX_NUM\n");
+ pos += scnprintf(buf + pos, len - pos, "PG %4u %4u\n",
+ nodes->pg_base_id, nodes->pg_num);
+ pos += scnprintf(buf + pos, len - pos, "PRI %4u %4u\n",
+ nodes->pri_base_id, nodes->pri_num);
+ pos += scnprintf(buf + pos, len - pos, "QSET %4u %4u\n",
+ le16_to_cpu(nodes->qset_base_id),
+ le16_to_cpu(nodes->qset_num));
+ pos += scnprintf(buf + pos, len - pos, "QUEUE %4u %4u\n",
+ le16_to_cpu(nodes->queue_base_id),
+ le16_to_cpu(nodes->queue_num));
+
+ return 0;
+}
+
+static const struct hclge_dbg_item tm_pri_items[] = {
+ { "ID", 4 },
+ { "MODE", 2 },
+ { "DWRR", 2 },
+ { "C_IR_B", 2 },
+ { "C_IR_U", 2 },
+ { "C_IR_S", 2 },
+ { "C_BS_B", 2 },
+ { "C_BS_S", 2 },
+ { "C_FLAG", 2 },
+ { "C_RATE(Mbps)", 2 },
+ { "P_IR_B", 2 },
+ { "P_IR_U", 2 },
+ { "P_IR_S", 2 },
+ { "P_BS_B", 2 },
+ { "P_BS_S", 2 },
+ { "P_FLAG", 2 },
+ { "P_RATE(Mbps)", 0 }
+};
+
+static int hclge_dbg_dump_tm_pri(struct hclge_dev *hdev, char *buf, int len)
+{
+ char data_str[ARRAY_SIZE(tm_pri_items)][HCLGE_DBG_DATA_STR_LEN];
+ struct hclge_tm_shaper_para c_shaper_para, p_shaper_para;
+ char *result[ARRAY_SIZE(tm_pri_items)], *sch_mode_str;
+ char content[HCLGE_DBG_TM_INFO_LEN];
+ u8 pri_num, sch_mode, weight, i, j;
+ int pos, ret;
+
+ ret = hclge_tm_get_pri_num(hdev, &pri_num);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(tm_pri_items); i++)
+ result[i] = &data_str[i][0];
+
+ hclge_dbg_fill_content(content, sizeof(content), tm_pri_items,
+ NULL, ARRAY_SIZE(tm_pri_items));
+ pos = scnprintf(buf, len, "%s", content);
+
+ for (i = 0; i < pri_num; i++) {
+ ret = hclge_tm_get_pri_sch_mode(hdev, i, &sch_mode);
+ if (ret)
+ return ret;
+
+ ret = hclge_tm_get_pri_weight(hdev, i, &weight);
+ if (ret)
+ return ret;
+
+ ret = hclge_tm_get_pri_shaper(hdev, i,
+ HCLGE_OPC_TM_PRI_C_SHAPPING,
+ &c_shaper_para);
+ if (ret)
+ return ret;
+
+ ret = hclge_tm_get_pri_shaper(hdev, i,
+ HCLGE_OPC_TM_PRI_P_SHAPPING,
+ &p_shaper_para);
+ if (ret)
+ return ret;
+
+ sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
+ "sp";
+
+ j = 0;
+ sprintf(result[j++], "%04u", i);
+ sprintf(result[j++], "%4s", sch_mode_str);
+ sprintf(result[j++], "%3u", weight);
+ hclge_dbg_fill_shaper_content(&c_shaper_para, result, &j);
+ hclge_dbg_fill_shaper_content(&p_shaper_para, result, &j);
+ hclge_dbg_fill_content(content, sizeof(content), tm_pri_items,
+ (const char **)result,
+ ARRAY_SIZE(tm_pri_items));
+ pos += scnprintf(buf + pos, len - pos, "%s", content);
+ }
+
+ return 0;
+}
+
+static const struct hclge_dbg_item tm_qset_items[] = {
+ { "ID", 4 },
+ { "MAP_PRI", 2 },
+ { "LINK_VLD", 2 },
+ { "MODE", 2 },
+ { "DWRR", 2 },
+ { "IR_B", 2 },
+ { "IR_U", 2 },
+ { "IR_S", 2 },
+ { "BS_B", 2 },
+ { "BS_S", 2 },
+ { "FLAG", 2 },
+ { "RATE(Mbps)", 0 }
+};
+
+static int hclge_dbg_dump_tm_qset(struct hclge_dev *hdev, char *buf, int len)
+{
+ char data_str[ARRAY_SIZE(tm_qset_items)][HCLGE_DBG_DATA_STR_LEN];
+ char *result[ARRAY_SIZE(tm_qset_items)], *sch_mode_str;
+ u8 priority, link_vld, sch_mode, weight;
+ struct hclge_tm_shaper_para shaper_para;
+ char content[HCLGE_DBG_TM_INFO_LEN];
+ u16 qset_num, i;
+ int ret, pos;
+ u8 j;
+
+ ret = hclge_tm_get_qset_num(hdev, &qset_num);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(tm_qset_items); i++)
+ result[i] = &data_str[i][0];
+
+ hclge_dbg_fill_content(content, sizeof(content), tm_qset_items,
+ NULL, ARRAY_SIZE(tm_qset_items));
+ pos = scnprintf(buf, len, "%s", content);
+
+ for (i = 0; i < qset_num; i++) {
+ ret = hclge_tm_get_qset_map_pri(hdev, i, &priority, &link_vld);
+ if (ret)
+ return ret;
+
+ ret = hclge_tm_get_qset_sch_mode(hdev, i, &sch_mode);
+ if (ret)
+ return ret;
+
+ ret = hclge_tm_get_qset_weight(hdev, i, &weight);
+ if (ret)
+ return ret;
+
+ ret = hclge_tm_get_qset_shaper(hdev, i, &shaper_para);
+ if (ret)
+ return ret;
+
+ sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
+ "sp";
+
+ j = 0;
+ sprintf(result[j++], "%04u", i);
+ sprintf(result[j++], "%4u", priority);
+ sprintf(result[j++], "%4u", link_vld);
+ sprintf(result[j++], "%4s", sch_mode_str);
+ sprintf(result[j++], "%3u", weight);
+ hclge_dbg_fill_shaper_content(&shaper_para, result, &j);
+
+ hclge_dbg_fill_content(content, sizeof(content), tm_qset_items,
+ (const char **)result,
+ ARRAY_SIZE(tm_qset_items));
+ pos += scnprintf(buf + pos, len - pos, "%s", content);
+ }
+
+ return 0;
+}
+
+static int hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev, char *buf,
+ int len)
+{
+ struct hclge_cfg_pause_param_cmd *pause_param;
+ struct hclge_desc desc;
+ int pos = 0;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to dump qos pause, ret = %d\n", ret);
+ return ret;
+ }
+
+ pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
+
+ pos += scnprintf(buf + pos, len - pos, "pause_trans_gap: 0x%x\n",
+ pause_param->pause_trans_gap);
+ pos += scnprintf(buf + pos, len - pos, "pause_trans_time: 0x%x\n",
+ le16_to_cpu(pause_param->pause_trans_time));
+ return 0;
+}
+
+#define HCLGE_DBG_TC_MASK 0x0F
+
+static int hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev, char *buf,
+ int len)
+{
+#define HCLGE_DBG_TC_BIT_WIDTH 4
+
+ struct hclge_qos_pri_map_cmd *pri_map;
+ struct hclge_desc desc;
+ int pos = 0;
+ u8 *pri_tc;
+ u8 tc, i;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to dump qos pri map, ret = %d\n", ret);
+ return ret;
+ }
+
+ pri_map = (struct hclge_qos_pri_map_cmd *)desc.data;
+
+ pos += scnprintf(buf + pos, len - pos, "vlan_to_pri: 0x%x\n",
+ pri_map->vlan_pri);
+ pos += scnprintf(buf + pos, len - pos, "PRI TC\n");
+
+ pri_tc = (u8 *)pri_map;
+ for (i = 0; i < HNAE3_MAX_TC; i++) {
+ tc = pri_tc[i >> 1] >> ((i & 1) * HCLGE_DBG_TC_BIT_WIDTH);
+ tc &= HCLGE_DBG_TC_MASK;
+ pos += scnprintf(buf + pos, len - pos, "%u %u\n", i, tc);
+ }
+
+ return 0;
+}
+
+static int hclge_dbg_dump_qos_dscp_map(struct hclge_dev *hdev, char *buf,
+ int len)
+{
+ struct hnae3_knic_private_info *kinfo = &hdev->vport[0].nic.kinfo;
+ struct hclge_desc desc[HCLGE_DSCP_MAP_TC_BD_NUM];
+ u8 *req0 = (u8 *)desc[0].data;
+ u8 *req1 = (u8 *)desc[1].data;
+ u8 dscp_tc[HNAE3_MAX_DSCP];
+ int pos, ret;
+ u8 i, j;
+
+ pos = scnprintf(buf, len, "tc map mode: %s\n",
+ tc_map_mode_str[kinfo->tc_map_mode]);
+
+ if (kinfo->tc_map_mode != HNAE3_TC_MAP_MODE_DSCP)
+ return 0;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QOS_MAP, true);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_QOS_MAP, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_DSCP_MAP_TC_BD_NUM);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to dump qos dscp map, ret = %d\n", ret);
+ return ret;
+ }
+
+ pos += scnprintf(buf + pos, len - pos, "\nDSCP PRIO TC\n");
+
+ /* The low 32 dscp setting use bd0, high 32 dscp setting use bd1 */
+ for (i = 0; i < HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM; i++) {
+ j = i + HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM;
+ /* Each dscp setting has 4 bits, so each byte saves two dscp
+ * setting
+ */
+ dscp_tc[i] = req0[i >> 1] >> HCLGE_DSCP_TC_SHIFT(i);
+ dscp_tc[j] = req1[i >> 1] >> HCLGE_DSCP_TC_SHIFT(i);
+ dscp_tc[i] &= HCLGE_DBG_TC_MASK;
+ dscp_tc[j] &= HCLGE_DBG_TC_MASK;
+ }
+
+ for (i = 0; i < HNAE3_MAX_DSCP; i++) {
+ if (kinfo->dscp_prio[i] == HNAE3_PRIO_ID_INVALID)
+ continue;
+
+ pos += scnprintf(buf + pos, len - pos, " %2u %u %u\n",
+ i, kinfo->dscp_prio[i], dscp_tc[i]);
+ }
+
+ return 0;
+}
+
+static int hclge_dbg_dump_tx_buf_cfg(struct hclge_dev *hdev, char *buf, int len)
+{
+ struct hclge_tx_buff_alloc_cmd *tx_buf_cmd;
+ struct hclge_desc desc;
+ int pos = 0;
+ int i, ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to dump tx buf, ret = %d\n", ret);
+ return ret;
+ }
+
+ tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc.data;
+ for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
+ pos += scnprintf(buf + pos, len - pos,
+ "tx_packet_buf_tc_%d: 0x%x\n", i,
+ le16_to_cpu(tx_buf_cmd->tx_pkt_buff[i]));
+
+ return pos;
+}
+
+static int hclge_dbg_dump_rx_priv_buf_cfg(struct hclge_dev *hdev, char *buf,
+ int len)
+{
+ struct hclge_rx_priv_buff_cmd *rx_buf_cmd;
+ struct hclge_desc desc;
+ int pos = 0;
+ int i, ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to dump rx priv buf, ret = %d\n", ret);
+ return ret;
+ }
+
+ pos += scnprintf(buf + pos, len - pos, "\n");
+
+ rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc.data;
+ for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
+ pos += scnprintf(buf + pos, len - pos,
+ "rx_packet_buf_tc_%d: 0x%x\n", i,
+ le16_to_cpu(rx_buf_cmd->buf_num[i]));
+
+ pos += scnprintf(buf + pos, len - pos, "rx_share_buf: 0x%x\n",
+ le16_to_cpu(rx_buf_cmd->shared_buf));
+
+ return pos;
+}
+
+static int hclge_dbg_dump_rx_common_wl_cfg(struct hclge_dev *hdev, char *buf,
+ int len)
+{
+ struct hclge_rx_com_wl *rx_com_wl;
+ struct hclge_desc desc;
+ int pos = 0;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to dump rx common wl, ret = %d\n", ret);
+ return ret;
+ }
+
+ rx_com_wl = (struct hclge_rx_com_wl *)desc.data;
+ pos += scnprintf(buf + pos, len - pos, "\n");
+ pos += scnprintf(buf + pos, len - pos,
+ "rx_com_wl: high: 0x%x, low: 0x%x\n",
+ le16_to_cpu(rx_com_wl->com_wl.high),
+ le16_to_cpu(rx_com_wl->com_wl.low));
+
+ return pos;
+}
+
+static int hclge_dbg_dump_rx_global_pkt_cnt(struct hclge_dev *hdev, char *buf,
+ int len)
+{
+ struct hclge_rx_com_wl *rx_packet_cnt;
+ struct hclge_desc desc;
+ int pos = 0;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_GBL_PKT_CNT, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to dump rx global pkt cnt, ret = %d\n", ret);
+ return ret;
+ }
+
+ rx_packet_cnt = (struct hclge_rx_com_wl *)desc.data;
+ pos += scnprintf(buf + pos, len - pos,
+ "rx_global_packet_cnt: high: 0x%x, low: 0x%x\n",
+ le16_to_cpu(rx_packet_cnt->com_wl.high),
+ le16_to_cpu(rx_packet_cnt->com_wl.low));
+
+ return pos;
+}
+
+static int hclge_dbg_dump_rx_priv_wl_buf_cfg(struct hclge_dev *hdev, char *buf,
+ int len)
+{
+ struct hclge_rx_priv_wl_buf *rx_priv_wl;
+ struct hclge_desc desc[2];
+ int pos = 0;
+ int i, ret;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_PRIV_WL_ALLOC, true);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_PRIV_WL_ALLOC, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, 2);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to dump rx priv wl buf, ret = %d\n", ret);
+ return ret;
+ }
+
+ rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[0].data;
+ for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
+ pos += scnprintf(buf + pos, len - pos,
+ "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i,
+ le16_to_cpu(rx_priv_wl->tc_wl[i].high),
+ le16_to_cpu(rx_priv_wl->tc_wl[i].low));
+
+ rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[1].data;
+ for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
+ pos += scnprintf(buf + pos, len - pos,
+ "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n",
+ i + HCLGE_TC_NUM_ONE_DESC,
+ le16_to_cpu(rx_priv_wl->tc_wl[i].high),
+ le16_to_cpu(rx_priv_wl->tc_wl[i].low));
+
+ return pos;
+}
+
+static int hclge_dbg_dump_rx_common_threshold_cfg(struct hclge_dev *hdev,
+ char *buf, int len)
+{
+ struct hclge_rx_com_thrd *rx_com_thrd;
+ struct hclge_desc desc[2];
+ int pos = 0;
+ int i, ret;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_COM_THRD_ALLOC, true);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_COM_THRD_ALLOC, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, 2);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to dump rx common threshold, ret = %d\n", ret);
+ return ret;
+ }
+
+ pos += scnprintf(buf + pos, len - pos, "\n");
+ rx_com_thrd = (struct hclge_rx_com_thrd *)desc[0].data;
+ for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
+ pos += scnprintf(buf + pos, len - pos,
+ "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i,
+ le16_to_cpu(rx_com_thrd->com_thrd[i].high),
+ le16_to_cpu(rx_com_thrd->com_thrd[i].low));
+
+ rx_com_thrd = (struct hclge_rx_com_thrd *)desc[1].data;
+ for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
+ pos += scnprintf(buf + pos, len - pos,
+ "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n",
+ i + HCLGE_TC_NUM_ONE_DESC,
+ le16_to_cpu(rx_com_thrd->com_thrd[i].high),
+ le16_to_cpu(rx_com_thrd->com_thrd[i].low));
+
+ return pos;
+}
+
+static int hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev, char *buf,
+ int len)
+{
+ int pos = 0;
+ int ret;
+
+ ret = hclge_dbg_dump_tx_buf_cfg(hdev, buf + pos, len - pos);
+ if (ret < 0)
+ return ret;
+ pos += ret;
+
+ ret = hclge_dbg_dump_rx_priv_buf_cfg(hdev, buf + pos, len - pos);
+ if (ret < 0)
+ return ret;
+ pos += ret;
+
+ ret = hclge_dbg_dump_rx_common_wl_cfg(hdev, buf + pos, len - pos);
+ if (ret < 0)
+ return ret;
+ pos += ret;
+
+ ret = hclge_dbg_dump_rx_global_pkt_cnt(hdev, buf + pos, len - pos);
+ if (ret < 0)
+ return ret;
+ pos += ret;
+
+ pos += scnprintf(buf + pos, len - pos, "\n");
+ if (!hnae3_dev_dcb_supported(hdev))
+ return 0;
+
+ ret = hclge_dbg_dump_rx_priv_wl_buf_cfg(hdev, buf + pos, len - pos);
+ if (ret < 0)
+ return ret;
+ pos += ret;
+
+ ret = hclge_dbg_dump_rx_common_threshold_cfg(hdev, buf + pos,
+ len - pos);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int hclge_dbg_dump_mng_table(struct hclge_dev *hdev, char *buf, int len)
+{
+ struct hclge_mac_ethertype_idx_rd_cmd *req0;
+ struct hclge_desc desc;
+ u32 msg_egress_port;
+ int pos = 0;
+ int ret, i;
+
+ pos += scnprintf(buf + pos, len - pos,
+ "entry mac_addr mask ether ");
+ pos += scnprintf(buf + pos, len - pos,
+ "mask vlan mask i_map i_dir e_type ");
+ pos += scnprintf(buf + pos, len - pos, "pf_id vf_id q_id drop\n");
+
+ for (i = 0; i < HCLGE_DBG_MNG_TBL_MAX; i++) {
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_MAC_ETHERTYPE_IDX_RD,
+ true);
+ req0 = (struct hclge_mac_ethertype_idx_rd_cmd *)&desc.data;
+ req0->index = cpu_to_le16(i);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to dump manage table, ret = %d\n", ret);
+ return ret;
+ }
+
+ if (!req0->resp_code)
+ continue;
+
+ pos += scnprintf(buf + pos, len - pos, "%02u %pM ",
+ le16_to_cpu(req0->index), req0->mac_addr);
+
+ pos += scnprintf(buf + pos, len - pos,
+ "%x %04x %x %04x ",
+ !!(req0->flags & HCLGE_DBG_MNG_MAC_MASK_B),
+ le16_to_cpu(req0->ethter_type),
+ !!(req0->flags & HCLGE_DBG_MNG_ETHER_MASK_B),
+ le16_to_cpu(req0->vlan_tag) &
+ HCLGE_DBG_MNG_VLAN_TAG);
+
+ pos += scnprintf(buf + pos, len - pos,
+ "%x %02x %02x ",
+ !!(req0->flags & HCLGE_DBG_MNG_VLAN_MASK_B),
+ req0->i_port_bitmap, req0->i_port_direction);
+
+ msg_egress_port = le16_to_cpu(req0->egress_port);
+ pos += scnprintf(buf + pos, len - pos,
+ "%x %x %02x %04x %x\n",
+ !!(msg_egress_port & HCLGE_DBG_MNG_E_TYPE_B),
+ msg_egress_port & HCLGE_DBG_MNG_PF_ID,
+ (msg_egress_port >> 3) & HCLGE_DBG_MNG_VF_ID,
+ le16_to_cpu(req0->egress_queue),
+ !!(msg_egress_port & HCLGE_DBG_MNG_DROP_B));
+ }
+
+ return 0;
+}
+
+#define HCLGE_DBG_TCAM_BUF_SIZE 256
+
+static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, bool sel_x,
+ char *tcam_buf,
+ struct hclge_dbg_tcam_msg tcam_msg)
+{
+ struct hclge_fd_tcam_config_1_cmd *req1;
+ struct hclge_fd_tcam_config_2_cmd *req2;
+ struct hclge_fd_tcam_config_3_cmd *req3;
+ struct hclge_desc desc[3];
+ int pos = 0;
+ int ret, i;
+ __le32 *req;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, true);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, true);
+ desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, true);
+
+ req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
+ req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
+ req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
+
+ req1->stage = tcam_msg.stage;
+ req1->xy_sel = sel_x ? 1 : 0;
+ req1->index = cpu_to_le32(tcam_msg.loc);
+
+ ret = hclge_cmd_send(&hdev->hw, desc, 3);
+ if (ret)
+ return ret;
+
+ pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
+ "read result tcam key %s(%u):\n", sel_x ? "x" : "y",
+ tcam_msg.loc);
+
+ /* tcam_data0 ~ tcam_data1 */
+ req = (__le32 *)req1->tcam_data;
+ for (i = 0; i < 2; i++)
+ pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
+ "%08x\n", le32_to_cpu(*req++));
+
+ /* tcam_data2 ~ tcam_data7 */
+ req = (__le32 *)req2->tcam_data;
+ for (i = 0; i < 6; i++)
+ pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
+ "%08x\n", le32_to_cpu(*req++));
+
+ /* tcam_data8 ~ tcam_data12 */
+ req = (__le32 *)req3->tcam_data;
+ for (i = 0; i < 5; i++)
+ pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
+ "%08x\n", le32_to_cpu(*req++));
+
+ return ret;
+}
+
+static int hclge_dbg_get_rules_location(struct hclge_dev *hdev, u16 *rule_locs)
+{
+ struct hclge_fd_rule *rule;
+ struct hlist_node *node;
+ int cnt = 0;
+
+ spin_lock_bh(&hdev->fd_rule_lock);
+ hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
+ rule_locs[cnt] = rule->location;
+ cnt++;
+ }
+ spin_unlock_bh(&hdev->fd_rule_lock);
+
+ if (cnt != hdev->hclge_fd_rule_num || cnt == 0)
+ return -EINVAL;
+
+ return cnt;
+}
+
+static int hclge_dbg_dump_fd_tcam(struct hclge_dev *hdev, char *buf, int len)
+{
+ u32 rule_num = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
+ struct hclge_dbg_tcam_msg tcam_msg;
+ int i, ret, rule_cnt;
+ u16 *rule_locs;
+ char *tcam_buf;
+ int pos = 0;
+
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
+ dev_err(&hdev->pdev->dev,
+ "Only FD-supported dev supports dump fd tcam\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (!hdev->hclge_fd_rule_num || !rule_num)
+ return 0;
+
+ rule_locs = kcalloc(rule_num, sizeof(u16), GFP_KERNEL);
+ if (!rule_locs)
+ return -ENOMEM;
+
+ tcam_buf = kzalloc(HCLGE_DBG_TCAM_BUF_SIZE, GFP_KERNEL);
+ if (!tcam_buf) {
+ kfree(rule_locs);
+ return -ENOMEM;
+ }
+
+ rule_cnt = hclge_dbg_get_rules_location(hdev, rule_locs);
+ if (rule_cnt < 0) {
+ ret = rule_cnt;
+ dev_err(&hdev->pdev->dev,
+ "failed to get rule number, ret = %d\n", ret);
+ goto out;
+ }
+
+ ret = 0;
+ for (i = 0; i < rule_cnt; i++) {
+ tcam_msg.stage = HCLGE_FD_STAGE_1;
+ tcam_msg.loc = rule_locs[i];
+
+ ret = hclge_dbg_fd_tcam_read(hdev, true, tcam_buf, tcam_msg);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get fd tcam key x, ret = %d\n", ret);
+ goto out;
+ }
+
+ pos += scnprintf(buf + pos, len - pos, "%s", tcam_buf);
+
+ ret = hclge_dbg_fd_tcam_read(hdev, false, tcam_buf, tcam_msg);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get fd tcam key y, ret = %d\n", ret);
+ goto out;
+ }
+
+ pos += scnprintf(buf + pos, len - pos, "%s", tcam_buf);
+ }
+
+out:
+ kfree(tcam_buf);
+ kfree(rule_locs);
+ return ret;
+}
+
+static int hclge_dbg_dump_fd_counter(struct hclge_dev *hdev, char *buf, int len)
+{
+ u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */
+ struct hclge_fd_ad_cnt_read_cmd *req;
+ char str_id[HCLGE_DBG_ID_LEN];
+ struct hclge_desc desc;
+ int pos = 0;
+ int ret;
+ u64 cnt;
+ u8 i;
+
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
+ return -EOPNOTSUPP;
+
+ pos += scnprintf(buf + pos, len - pos,
+ "func_id\thit_times\n");
+
+ for (i = 0; i < func_num; i++) {
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_CNT_OP, true);
+ req = (struct hclge_fd_ad_cnt_read_cmd *)desc.data;
+ req->index = cpu_to_le16(i);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "failed to get fd counter, ret = %d\n",
+ ret);
+ return ret;
+ }
+ cnt = le64_to_cpu(req->cnt);
+ hclge_dbg_get_func_id_str(str_id, i);
+ pos += scnprintf(buf + pos, len - pos,
+ "%s\t%llu\n", str_id, cnt);
+ }
+
+ return 0;
+}
+
+static const struct hclge_dbg_status_dfx_info hclge_dbg_rst_info[] = {
+ {HCLGE_MISC_VECTOR_REG_BASE, "vector0 interrupt enable status"},
+ {HCLGE_MISC_RESET_STS_REG, "reset interrupt source"},
+ {HCLGE_MISC_VECTOR_INT_STS, "reset interrupt status"},
+ {HCLGE_RAS_PF_OTHER_INT_STS_REG, "RAS interrupt status"},
+ {HCLGE_GLOBAL_RESET_REG, "hardware reset status"},
+ {HCLGE_NIC_CSQ_DEPTH_REG, "handshake status"},
+ {HCLGE_FUN_RST_ING, "function reset status"}
+};
+
+int hclge_dbg_dump_rst_info(struct hclge_dev *hdev, char *buf, int len)
+{
+ u32 i, offset;
+ int pos = 0;
+
+ pos += scnprintf(buf + pos, len - pos, "PF reset count: %u\n",
+ hdev->rst_stats.pf_rst_cnt);
+ pos += scnprintf(buf + pos, len - pos, "FLR reset count: %u\n",
+ hdev->rst_stats.flr_rst_cnt);
+ pos += scnprintf(buf + pos, len - pos, "GLOBAL reset count: %u\n",
+ hdev->rst_stats.global_rst_cnt);
+ pos += scnprintf(buf + pos, len - pos, "IMP reset count: %u\n",
+ hdev->rst_stats.imp_rst_cnt);
+ pos += scnprintf(buf + pos, len - pos, "reset done count: %u\n",
+ hdev->rst_stats.reset_done_cnt);
+ pos += scnprintf(buf + pos, len - pos, "HW reset done count: %u\n",
+ hdev->rst_stats.hw_reset_done_cnt);
+ pos += scnprintf(buf + pos, len - pos, "reset count: %u\n",
+ hdev->rst_stats.reset_cnt);
+ pos += scnprintf(buf + pos, len - pos, "reset fail count: %u\n",
+ hdev->rst_stats.reset_fail_cnt);
+
+ for (i = 0; i < ARRAY_SIZE(hclge_dbg_rst_info); i++) {
+ offset = hclge_dbg_rst_info[i].offset;
+ pos += scnprintf(buf + pos, len - pos, "%s: 0x%x\n",
+ hclge_dbg_rst_info[i].message,
+ hclge_read_dev(&hdev->hw, offset));
+ }
+
+ pos += scnprintf(buf + pos, len - pos, "hdev state: 0x%lx\n",
+ hdev->state);
+
+ return 0;
+}
+
+static int hclge_dbg_dump_serv_info(struct hclge_dev *hdev, char *buf, int len)
+{
+ unsigned long rem_nsec;
+ int pos = 0;
+ u64 lc;
+
+ lc = local_clock();
+ rem_nsec = do_div(lc, HCLGE_BILLION_NANO_SECONDS);
+
+ pos += scnprintf(buf + pos, len - pos, "local_clock: [%5lu.%06lu]\n",
+ (unsigned long)lc, rem_nsec / 1000);
+ pos += scnprintf(buf + pos, len - pos, "delta: %u(ms)\n",
+ jiffies_to_msecs(jiffies - hdev->last_serv_processed));
+ pos += scnprintf(buf + pos, len - pos,
+ "last_service_task_processed: %lu(jiffies)\n",
+ hdev->last_serv_processed);
+ pos += scnprintf(buf + pos, len - pos, "last_service_task_cnt: %lu\n",
+ hdev->serv_processed_cnt);
+
+ return 0;
+}
+
+static int hclge_dbg_dump_interrupt(struct hclge_dev *hdev, char *buf, int len)
+{
+ int pos = 0;
+
+ pos += scnprintf(buf + pos, len - pos, "num_nic_msi: %u\n",
+ hdev->num_nic_msi);
+ pos += scnprintf(buf + pos, len - pos, "num_roce_msi: %u\n",
+ hdev->num_roce_msi);
+ pos += scnprintf(buf + pos, len - pos, "num_msi_used: %u\n",
+ hdev->num_msi_used);
+ pos += scnprintf(buf + pos, len - pos, "num_msi_left: %u\n",
+ hdev->num_msi_left);
+
+ return 0;
+}
+
+static void hclge_dbg_imp_info_data_print(struct hclge_desc *desc_src,
+ char *buf, int len, u32 bd_num)
+{
+#define HCLGE_DBG_IMP_INFO_PRINT_OFFSET 0x2
+
+ struct hclge_desc *desc_index = desc_src;
+ u32 offset = 0;
+ int pos = 0;
+ u32 i, j;
+
+ pos += scnprintf(buf + pos, len - pos, "offset | data\n");
+
+ for (i = 0; i < bd_num; i++) {
+ j = 0;
+ while (j < HCLGE_DESC_DATA_LEN - 1) {
+ pos += scnprintf(buf + pos, len - pos, "0x%04x | ",
+ offset);
+ pos += scnprintf(buf + pos, len - pos, "0x%08x ",
+ le32_to_cpu(desc_index->data[j++]));
+ pos += scnprintf(buf + pos, len - pos, "0x%08x\n",
+ le32_to_cpu(desc_index->data[j++]));
+ offset += sizeof(u32) * HCLGE_DBG_IMP_INFO_PRINT_OFFSET;
+ }
+ desc_index++;
+ }
+}
+
+static int
+hclge_dbg_get_imp_stats_info(struct hclge_dev *hdev, char *buf, int len)
+{
+ struct hclge_get_imp_bd_cmd *req;
+ struct hclge_desc *desc_src;
+ struct hclge_desc desc;
+ u32 bd_num;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_IMP_STATS_BD, true);
+
+ req = (struct hclge_get_imp_bd_cmd *)desc.data;
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get imp statistics bd number, ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ bd_num = le32_to_cpu(req->bd_num);
+ if (!bd_num) {
+ dev_err(&hdev->pdev->dev, "imp statistics bd number is 0!\n");
+ return -EINVAL;
+ }
+
+ desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
+ if (!desc_src)
+ return -ENOMEM;
+
+ ret = hclge_dbg_cmd_send(hdev, desc_src, 0, bd_num,
+ HCLGE_OPC_IMP_STATS_INFO);
+ if (ret) {
+ kfree(desc_src);
+ dev_err(&hdev->pdev->dev,
+ "failed to get imp statistics, ret = %d\n", ret);
+ return ret;
+ }
+
+ hclge_dbg_imp_info_data_print(desc_src, buf, len, bd_num);
+
+ kfree(desc_src);
+
+ return 0;
+}
+
+#define HCLGE_CMD_NCL_CONFIG_BD_NUM 5
+#define HCLGE_MAX_NCL_CONFIG_LENGTH 16384
+
+static void hclge_ncl_config_data_print(struct hclge_desc *desc, int *index,
+ char *buf, int len, int *pos)
+{
+#define HCLGE_CMD_DATA_NUM 6
+
+ int offset = HCLGE_MAX_NCL_CONFIG_LENGTH - *index;
+ int i, j;
+
+ for (i = 0; i < HCLGE_CMD_NCL_CONFIG_BD_NUM; i++) {
+ for (j = 0; j < HCLGE_CMD_DATA_NUM; j++) {
+ if (i == 0 && j == 0)
+ continue;
+
+ *pos += scnprintf(buf + *pos, len - *pos,
+ "0x%04x | 0x%08x\n", offset,
+ le32_to_cpu(desc[i].data[j]));
+
+ offset += sizeof(u32);
+ *index -= sizeof(u32);
+
+ if (*index <= 0)
+ return;
+ }
+ }
+}
+
+static int
+hclge_dbg_dump_ncl_config(struct hclge_dev *hdev, char *buf, int len)
+{
+#define HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD (20 + 24 * 4)
+
+ struct hclge_desc desc[HCLGE_CMD_NCL_CONFIG_BD_NUM];
+ int bd_num = HCLGE_CMD_NCL_CONFIG_BD_NUM;
+ int index = HCLGE_MAX_NCL_CONFIG_LENGTH;
+ int pos = 0;
+ u32 data0;
+ int ret;
+
+ pos += scnprintf(buf + pos, len - pos, "offset | data\n");
+
+ while (index > 0) {
+ data0 = HCLGE_MAX_NCL_CONFIG_LENGTH - index;
+ if (index >= HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD)
+ data0 |= HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD << 16;
+ else
+ data0 |= (u32)index << 16;
+ ret = hclge_dbg_cmd_send(hdev, desc, data0, bd_num,
+ HCLGE_OPC_QUERY_NCL_CONFIG);
+ if (ret)
+ return ret;
+
+ hclge_ncl_config_data_print(desc, &index, buf, len, &pos);
+ }
+
+ return 0;
+}
+
+static int hclge_dbg_dump_loopback(struct hclge_dev *hdev, char *buf, int len)
+{
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+ struct hclge_config_mac_mode_cmd *req_app;
+ struct hclge_common_lb_cmd *req_common;
+ struct hclge_desc desc;
+ u8 loopback_en;
+ int pos = 0;
+ int ret;
+
+ req_app = (struct hclge_config_mac_mode_cmd *)desc.data;
+ req_common = (struct hclge_common_lb_cmd *)desc.data;
+
+ pos += scnprintf(buf + pos, len - pos, "mac id: %u\n",
+ hdev->hw.mac.mac_id);
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to dump app loopback status, ret = %d\n", ret);
+ return ret;
+ }
+
+ loopback_en = hnae3_get_bit(le32_to_cpu(req_app->txrx_pad_fcs_loop_en),
+ HCLGE_MAC_APP_LP_B);
+ pos += scnprintf(buf + pos, len - pos, "app loopback: %s\n",
+ state_str[loopback_en]);
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to dump common loopback status, ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ loopback_en = req_common->enable & HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
+ pos += scnprintf(buf + pos, len - pos, "serdes serial loopback: %s\n",
+ state_str[loopback_en]);
+
+ loopback_en = req_common->enable &
+ HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B ? 1 : 0;
+ pos += scnprintf(buf + pos, len - pos, "serdes parallel loopback: %s\n",
+ state_str[loopback_en]);
+
+ if (phydev) {
+ loopback_en = phydev->loopback_enabled;
+ pos += scnprintf(buf + pos, len - pos, "phy loopback: %s\n",
+ state_str[loopback_en]);
+ } else if (hnae3_dev_phy_imp_supported(hdev)) {
+ loopback_en = req_common->enable &
+ HCLGE_CMD_GE_PHY_INNER_LOOP_B;
+ pos += scnprintf(buf + pos, len - pos, "phy loopback: %s\n",
+ state_str[loopback_en]);
+ }
+
+ return 0;
+}
+
+/* hclge_dbg_dump_mac_tnl_status: print message about mac tnl interrupt
+ * @hdev: pointer to struct hclge_dev
+ */
+static int
+hclge_dbg_dump_mac_tnl_status(struct hclge_dev *hdev, char *buf, int len)
+{
+ struct hclge_mac_tnl_stats stats;
+ unsigned long rem_nsec;
+ int pos = 0;
+
+ pos += scnprintf(buf + pos, len - pos,
+ "Recently generated mac tnl interruption:\n");
+
+ while (kfifo_get(&hdev->mac_tnl_log, &stats)) {
+ rem_nsec = do_div(stats.time, HCLGE_BILLION_NANO_SECONDS);
+
+ pos += scnprintf(buf + pos, len - pos,
+ "[%07lu.%03lu] status = 0x%x\n",
+ (unsigned long)stats.time, rem_nsec / 1000,
+ stats.status);
+ }
+
+ return 0;
+}
+
+
+static const struct hclge_dbg_item mac_list_items[] = {
+ { "FUNC_ID", 2 },
+ { "MAC_ADDR", 12 },
+ { "STATE", 2 },
+};
+
+static void hclge_dbg_dump_mac_list(struct hclge_dev *hdev, char *buf, int len,
+ bool is_unicast)
+{
+ char data_str[ARRAY_SIZE(mac_list_items)][HCLGE_DBG_DATA_STR_LEN];
+ char content[HCLGE_DBG_INFO_LEN], str_id[HCLGE_DBG_ID_LEN];
+ char *result[ARRAY_SIZE(mac_list_items)];
+ struct hclge_mac_node *mac_node, *tmp;
+ struct hclge_vport *vport;
+ struct list_head *list;
+ u32 func_id;
+ int pos = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mac_list_items); i++)
+ result[i] = &data_str[i][0];
+
+ pos += scnprintf(buf + pos, len - pos, "%s MAC_LIST:\n",
+ is_unicast ? "UC" : "MC");
+ hclge_dbg_fill_content(content, sizeof(content), mac_list_items,
+ NULL, ARRAY_SIZE(mac_list_items));
+ pos += scnprintf(buf + pos, len - pos, "%s", content);
+
+ for (func_id = 0; func_id < hdev->num_alloc_vport; func_id++) {
+ vport = &hdev->vport[func_id];
+ list = is_unicast ? &vport->uc_mac_list : &vport->mc_mac_list;
+ spin_lock_bh(&vport->mac_list_lock);
+ list_for_each_entry_safe(mac_node, tmp, list, node) {
+ i = 0;
+ result[i++] = hclge_dbg_get_func_id_str(str_id,
+ func_id);
+ sprintf(result[i++], "%pM", mac_node->mac_addr);
+ sprintf(result[i++], "%5s",
+ hclge_mac_state_str[mac_node->state]);
+ hclge_dbg_fill_content(content, sizeof(content),
+ mac_list_items,
+ (const char **)result,
+ ARRAY_SIZE(mac_list_items));
+ pos += scnprintf(buf + pos, len - pos, "%s", content);
+ }
+ spin_unlock_bh(&vport->mac_list_lock);
+ }
+}
+
+static int hclge_dbg_dump_umv_info(struct hclge_dev *hdev, char *buf, int len)
+{
+ u8 func_num = pci_num_vf(hdev->pdev) + 1;
+ struct hclge_vport *vport;
+ int pos = 0;
+ u8 i;
+
+ pos += scnprintf(buf, len, "num_alloc_vport : %u\n",
+ hdev->num_alloc_vport);
+ pos += scnprintf(buf + pos, len - pos, "max_umv_size : %u\n",
+ hdev->max_umv_size);
+ pos += scnprintf(buf + pos, len - pos, "wanted_umv_size : %u\n",
+ hdev->wanted_umv_size);
+ pos += scnprintf(buf + pos, len - pos, "priv_umv_size : %u\n",
+ hdev->priv_umv_size);
+
+ mutex_lock(&hdev->vport_lock);
+ pos += scnprintf(buf + pos, len - pos, "share_umv_size : %u\n",
+ hdev->share_umv_size);
+ for (i = 0; i < func_num; i++) {
+ vport = &hdev->vport[i];
+ pos += scnprintf(buf + pos, len - pos,
+ "vport(%u) used_umv_num : %u\n",
+ i, vport->used_umv_num);
+ }
+ mutex_unlock(&hdev->vport_lock);
+
+ pos += scnprintf(buf + pos, len - pos, "used_mc_mac_num : %u\n",
+ hdev->used_mc_mac_num);
+
+ return 0;
+}
+
+static int hclge_get_vlan_rx_offload_cfg(struct hclge_dev *hdev, u8 vf_id,
+ struct hclge_dbg_vlan_cfg *vlan_cfg)
+{
+ struct hclge_vport_vtag_rx_cfg_cmd *req;
+ struct hclge_desc desc;
+ u16 bmap_index;
+ u8 rx_cfg;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, true);
+
+ req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
+ req->vf_offset = vf_id / HCLGE_VF_NUM_PER_CMD;
+ bmap_index = vf_id % HCLGE_VF_NUM_PER_CMD / HCLGE_VF_NUM_PER_BYTE;
+ req->vf_bitmap[bmap_index] = 1U << (vf_id % HCLGE_VF_NUM_PER_BYTE);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get vport%u rxvlan cfg, ret = %d\n",
+ vf_id, ret);
+ return ret;
+ }
+
+ rx_cfg = req->vport_vlan_cfg;
+ vlan_cfg->strip_tag1 = hnae3_get_bit(rx_cfg, HCLGE_REM_TAG1_EN_B);
+ vlan_cfg->strip_tag2 = hnae3_get_bit(rx_cfg, HCLGE_REM_TAG2_EN_B);
+ vlan_cfg->drop_tag1 = hnae3_get_bit(rx_cfg, HCLGE_DISCARD_TAG1_EN_B);
+ vlan_cfg->drop_tag2 = hnae3_get_bit(rx_cfg, HCLGE_DISCARD_TAG2_EN_B);
+ vlan_cfg->pri_only1 = hnae3_get_bit(rx_cfg, HCLGE_SHOW_TAG1_EN_B);
+ vlan_cfg->pri_only2 = hnae3_get_bit(rx_cfg, HCLGE_SHOW_TAG2_EN_B);
+
+ return 0;
+}
+
+static int hclge_get_vlan_tx_offload_cfg(struct hclge_dev *hdev, u8 vf_id,
+ struct hclge_dbg_vlan_cfg *vlan_cfg)
+{
+ struct hclge_vport_vtag_tx_cfg_cmd *req;
+ struct hclge_desc desc;
+ u16 bmap_index;
+ u8 tx_cfg;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, true);
+ req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
+ req->vf_offset = vf_id / HCLGE_VF_NUM_PER_CMD;
+ bmap_index = vf_id % HCLGE_VF_NUM_PER_CMD / HCLGE_VF_NUM_PER_BYTE;
+ req->vf_bitmap[bmap_index] = 1U << (vf_id % HCLGE_VF_NUM_PER_BYTE);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get vport%u txvlan cfg, ret = %d\n",
+ vf_id, ret);
+ return ret;
+ }
+
+ tx_cfg = req->vport_vlan_cfg;
+ vlan_cfg->pvid = le16_to_cpu(req->def_vlan_tag1);
+
+ vlan_cfg->accept_tag1 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_TAG1_B);
+ vlan_cfg->accept_tag2 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_TAG2_B);
+ vlan_cfg->accept_untag1 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_UNTAG1_B);
+ vlan_cfg->accept_untag2 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_UNTAG2_B);
+ vlan_cfg->insert_tag1 = hnae3_get_bit(tx_cfg, HCLGE_PORT_INS_TAG1_EN_B);
+ vlan_cfg->insert_tag2 = hnae3_get_bit(tx_cfg, HCLGE_PORT_INS_TAG2_EN_B);
+ vlan_cfg->shift_tag = hnae3_get_bit(tx_cfg, HCLGE_TAG_SHIFT_MODE_EN_B);
+
+ return 0;
+}
+
+static int hclge_get_vlan_filter_config_cmd(struct hclge_dev *hdev,
+ u8 vlan_type, u8 vf_id,
+ struct hclge_desc *desc)
+{
+ struct hclge_vlan_filter_ctrl_cmd *req;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
+ req = (struct hclge_vlan_filter_ctrl_cmd *)desc->data;
+ req->vlan_type = vlan_type;
+ req->vf_id = vf_id;
+
+ ret = hclge_cmd_send(&hdev->hw, desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to get vport%u vlan filter config, ret = %d.\n",
+ vf_id, ret);
+
+ return ret;
+}
+
+static int hclge_get_vlan_filter_state(struct hclge_dev *hdev, u8 vlan_type,
+ u8 vf_id, u8 *vlan_fe)
+{
+ struct hclge_vlan_filter_ctrl_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ ret = hclge_get_vlan_filter_config_cmd(hdev, vlan_type, vf_id, &desc);
+ if (ret)
+ return ret;
+
+ req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
+ *vlan_fe = req->vlan_fe;
+
+ return 0;
+}
+
+static int hclge_get_port_vlan_filter_bypass_state(struct hclge_dev *hdev,
+ u8 vf_id, u8 *bypass_en)
+{
+ struct hclge_port_vlan_filter_bypass_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ if (!test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, hdev->ae_dev->caps))
+ return 0;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PORT_VLAN_BYPASS, true);
+ req = (struct hclge_port_vlan_filter_bypass_cmd *)desc.data;
+ req->vf_id = vf_id;
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get vport%u port vlan filter bypass state, ret = %d.\n",
+ vf_id, ret);
+ return ret;
+ }
+
+ *bypass_en = hnae3_get_bit(req->bypass_state, HCLGE_INGRESS_BYPASS_B);
+
+ return 0;
+}
+
+static const struct hclge_dbg_item vlan_filter_items[] = {
+ { "FUNC_ID", 2 },
+ { "I_VF_VLAN_FILTER", 2 },
+ { "E_VF_VLAN_FILTER", 2 },
+ { "PORT_VLAN_FILTER_BYPASS", 0 }
+};
+
+static const struct hclge_dbg_item vlan_offload_items[] = {
+ { "FUNC_ID", 2 },
+ { "PVID", 4 },
+ { "ACCEPT_TAG1", 2 },
+ { "ACCEPT_TAG2", 2 },
+ { "ACCEPT_UNTAG1", 2 },
+ { "ACCEPT_UNTAG2", 2 },
+ { "INSERT_TAG1", 2 },
+ { "INSERT_TAG2", 2 },
+ { "SHIFT_TAG", 2 },
+ { "STRIP_TAG1", 2 },
+ { "STRIP_TAG2", 2 },
+ { "DROP_TAG1", 2 },
+ { "DROP_TAG2", 2 },
+ { "PRI_ONLY_TAG1", 2 },
+ { "PRI_ONLY_TAG2", 0 }
+};
+
+static int hclge_dbg_dump_vlan_filter_config(struct hclge_dev *hdev, char *buf,
+ int len, int *pos)
+{
+ char content[HCLGE_DBG_VLAN_FLTR_INFO_LEN], str_id[HCLGE_DBG_ID_LEN];
+ const char *result[ARRAY_SIZE(vlan_filter_items)];
+ u8 i, j, vlan_fe, bypass, ingress, egress;
+ u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */
+ int ret;
+
+ ret = hclge_get_vlan_filter_state(hdev, HCLGE_FILTER_TYPE_PORT, 0,
+ &vlan_fe);
+ if (ret)
+ return ret;
+ ingress = vlan_fe & HCLGE_FILTER_FE_NIC_INGRESS_B;
+ egress = vlan_fe & HCLGE_FILTER_FE_NIC_EGRESS_B ? 1 : 0;
+
+ *pos += scnprintf(buf, len, "I_PORT_VLAN_FILTER: %s\n",
+ state_str[ingress]);
+ *pos += scnprintf(buf + *pos, len - *pos, "E_PORT_VLAN_FILTER: %s\n",
+ state_str[egress]);
+
+ hclge_dbg_fill_content(content, sizeof(content), vlan_filter_items,
+ NULL, ARRAY_SIZE(vlan_filter_items));
+ *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
+
+ for (i = 0; i < func_num; i++) {
+ ret = hclge_get_vlan_filter_state(hdev, HCLGE_FILTER_TYPE_VF, i,
+ &vlan_fe);
+ if (ret)
+ return ret;
+
+ ingress = vlan_fe & HCLGE_FILTER_FE_NIC_INGRESS_B;
+ egress = vlan_fe & HCLGE_FILTER_FE_NIC_EGRESS_B ? 1 : 0;
+ ret = hclge_get_port_vlan_filter_bypass_state(hdev, i, &bypass);
+ if (ret)
+ return ret;
+ j = 0;
+ result[j++] = hclge_dbg_get_func_id_str(str_id, i);
+ result[j++] = state_str[ingress];
+ result[j++] = state_str[egress];
+ result[j++] =
+ test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B,
+ hdev->ae_dev->caps) ? state_str[bypass] : "NA";
+ hclge_dbg_fill_content(content, sizeof(content),
+ vlan_filter_items, result,
+ ARRAY_SIZE(vlan_filter_items));
+ *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
+ }
+ *pos += scnprintf(buf + *pos, len - *pos, "\n");
+
+ return 0;
+}
+
+static int hclge_dbg_dump_vlan_offload_config(struct hclge_dev *hdev, char *buf,
+ int len, int *pos)
+{
+ char str_id[HCLGE_DBG_ID_LEN], str_pvid[HCLGE_DBG_ID_LEN];
+ const char *result[ARRAY_SIZE(vlan_offload_items)];
+ char content[HCLGE_DBG_VLAN_OFFLOAD_INFO_LEN];
+ u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */
+ struct hclge_dbg_vlan_cfg vlan_cfg;
+ int ret;
+ u8 i, j;
+
+ hclge_dbg_fill_content(content, sizeof(content), vlan_offload_items,
+ NULL, ARRAY_SIZE(vlan_offload_items));
+ *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
+
+ for (i = 0; i < func_num; i++) {
+ ret = hclge_get_vlan_tx_offload_cfg(hdev, i, &vlan_cfg);
+ if (ret)
+ return ret;
+
+ ret = hclge_get_vlan_rx_offload_cfg(hdev, i, &vlan_cfg);
+ if (ret)
+ return ret;
+
+ sprintf(str_pvid, "%u", vlan_cfg.pvid);
+ j = 0;
+ result[j++] = hclge_dbg_get_func_id_str(str_id, i);
+ result[j++] = str_pvid;
+ result[j++] = state_str[vlan_cfg.accept_tag1];
+ result[j++] = state_str[vlan_cfg.accept_tag2];
+ result[j++] = state_str[vlan_cfg.accept_untag1];
+ result[j++] = state_str[vlan_cfg.accept_untag2];
+ result[j++] = state_str[vlan_cfg.insert_tag1];
+ result[j++] = state_str[vlan_cfg.insert_tag2];
+ result[j++] = state_str[vlan_cfg.shift_tag];
+ result[j++] = state_str[vlan_cfg.strip_tag1];
+ result[j++] = state_str[vlan_cfg.strip_tag2];
+ result[j++] = state_str[vlan_cfg.drop_tag1];
+ result[j++] = state_str[vlan_cfg.drop_tag2];
+ result[j++] = state_str[vlan_cfg.pri_only1];
+ result[j++] = state_str[vlan_cfg.pri_only2];
+
+ hclge_dbg_fill_content(content, sizeof(content),
+ vlan_offload_items, result,
+ ARRAY_SIZE(vlan_offload_items));
+ *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
+ }
+
+ return 0;
+}
+
+static int hclge_dbg_dump_vlan_config(struct hclge_dev *hdev, char *buf,
+ int len)
+{
+ int pos = 0;
+ int ret;
+
+ ret = hclge_dbg_dump_vlan_filter_config(hdev, buf, len, &pos);
+ if (ret)
+ return ret;
+
+ return hclge_dbg_dump_vlan_offload_config(hdev, buf, len, &pos);
+}
+
+static int hclge_dbg_dump_ptp_info(struct hclge_dev *hdev, char *buf, int len)
+{
+ struct hclge_ptp *ptp = hdev->ptp;
+ u32 sw_cfg = ptp->ptp_cfg;
+ unsigned int tx_start;
+ unsigned int last_rx;
+ int pos = 0;
+ u32 hw_cfg;
+ int ret;
+
+ pos += scnprintf(buf + pos, len - pos, "phc %s's debug info:\n",
+ ptp->info.name);
+ pos += scnprintf(buf + pos, len - pos, "ptp enable: %s\n",
+ test_bit(HCLGE_PTP_FLAG_EN, &ptp->flags) ?
+ "yes" : "no");
+ pos += scnprintf(buf + pos, len - pos, "ptp tx enable: %s\n",
+ test_bit(HCLGE_PTP_FLAG_TX_EN, &ptp->flags) ?
+ "yes" : "no");
+ pos += scnprintf(buf + pos, len - pos, "ptp rx enable: %s\n",
+ test_bit(HCLGE_PTP_FLAG_RX_EN, &ptp->flags) ?
+ "yes" : "no");
+
+ last_rx = jiffies_to_msecs(ptp->last_rx);
+ pos += scnprintf(buf + pos, len - pos, "last rx time: %lu.%lu\n",
+ last_rx / MSEC_PER_SEC, last_rx % MSEC_PER_SEC);
+ pos += scnprintf(buf + pos, len - pos, "rx count: %lu\n", ptp->rx_cnt);
+
+ tx_start = jiffies_to_msecs(ptp->tx_start);
+ pos += scnprintf(buf + pos, len - pos, "last tx start time: %lu.%lu\n",
+ tx_start / MSEC_PER_SEC, tx_start % MSEC_PER_SEC);
+ pos += scnprintf(buf + pos, len - pos, "tx count: %lu\n", ptp->tx_cnt);
+ pos += scnprintf(buf + pos, len - pos, "tx skipped count: %lu\n",
+ ptp->tx_skipped);
+ pos += scnprintf(buf + pos, len - pos, "tx timeout count: %lu\n",
+ ptp->tx_timeout);
+ pos += scnprintf(buf + pos, len - pos, "last tx seqid: %u\n",
+ ptp->last_tx_seqid);
+
+ ret = hclge_ptp_cfg_qry(hdev, &hw_cfg);
+ if (ret)
+ return ret;
+
+ pos += scnprintf(buf + pos, len - pos, "sw_cfg: %#x, hw_cfg: %#x\n",
+ sw_cfg, hw_cfg);
+
+ pos += scnprintf(buf + pos, len - pos, "tx type: %d, rx filter: %d\n",
+ ptp->ts_cfg.tx_type, ptp->ts_cfg.rx_filter);
+
+ return 0;
+}
+
+static int hclge_dbg_dump_mac_uc(struct hclge_dev *hdev, char *buf, int len)
+{
+ hclge_dbg_dump_mac_list(hdev, buf, len, true);
+
+ return 0;
+}
+
+static int hclge_dbg_dump_mac_mc(struct hclge_dev *hdev, char *buf, int len)
+{
+ hclge_dbg_dump_mac_list(hdev, buf, len, false);
+
+ return 0;
+}
+
+static const struct hclge_dbg_func hclge_dbg_cmd_func[] = {
+ {
+ .cmd = HNAE3_DBG_CMD_TM_NODES,
+ .dbg_dump = hclge_dbg_dump_tm_nodes,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_TM_PRI,
+ .dbg_dump = hclge_dbg_dump_tm_pri,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_TM_QSET,
+ .dbg_dump = hclge_dbg_dump_tm_qset,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_TM_MAP,
+ .dbg_dump = hclge_dbg_dump_tm_map,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_TM_PG,
+ .dbg_dump = hclge_dbg_dump_tm_pg,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_TM_PORT,
+ .dbg_dump = hclge_dbg_dump_tm_port,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_TC_SCH_INFO,
+ .dbg_dump = hclge_dbg_dump_tc,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_QOS_PAUSE_CFG,
+ .dbg_dump = hclge_dbg_dump_qos_pause_cfg,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_QOS_PRI_MAP,
+ .dbg_dump = hclge_dbg_dump_qos_pri_map,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_QOS_DSCP_MAP,
+ .dbg_dump = hclge_dbg_dump_qos_dscp_map,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_QOS_BUF_CFG,
+ .dbg_dump = hclge_dbg_dump_qos_buf_cfg,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_MAC_UC,
+ .dbg_dump = hclge_dbg_dump_mac_uc,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_MAC_MC,
+ .dbg_dump = hclge_dbg_dump_mac_mc,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_MNG_TBL,
+ .dbg_dump = hclge_dbg_dump_mng_table,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_LOOPBACK,
+ .dbg_dump = hclge_dbg_dump_loopback,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_PTP_INFO,
+ .dbg_dump = hclge_dbg_dump_ptp_info,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_INTERRUPT_INFO,
+ .dbg_dump = hclge_dbg_dump_interrupt,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_RESET_INFO,
+ .dbg_dump = hclge_dbg_dump_rst_info,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_IMP_INFO,
+ .dbg_dump = hclge_dbg_get_imp_stats_info,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_NCL_CONFIG,
+ .dbg_dump = hclge_dbg_dump_ncl_config,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_REG_BIOS_COMMON,
+ .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_REG_SSU,
+ .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_REG_IGU_EGU,
+ .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_REG_RPU,
+ .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_REG_NCSI,
+ .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_REG_RTC,
+ .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_REG_PPP,
+ .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_REG_RCB,
+ .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_REG_TQP,
+ .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_REG_MAC,
+ .dbg_dump = hclge_dbg_dump_mac,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_REG_DCB,
+ .dbg_dump = hclge_dbg_dump_dcb,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_FD_TCAM,
+ .dbg_dump = hclge_dbg_dump_fd_tcam,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_MAC_TNL_STATUS,
+ .dbg_dump = hclge_dbg_dump_mac_tnl_status,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_SERV_INFO,
+ .dbg_dump = hclge_dbg_dump_serv_info,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_VLAN_CONFIG,
+ .dbg_dump = hclge_dbg_dump_vlan_config,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_FD_COUNTER,
+ .dbg_dump = hclge_dbg_dump_fd_counter,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_UMV_INFO,
+ .dbg_dump = hclge_dbg_dump_umv_info,
+ },
+};
+
+int hclge_dbg_read_cmd(struct hnae3_handle *handle, enum hnae3_dbg_cmd cmd,
+ char *buf, int len)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ const struct hclge_dbg_func *cmd_func;
+ struct hclge_dev *hdev = vport->back;
+ u32 i;
+
+ for (i = 0; i < ARRAY_SIZE(hclge_dbg_cmd_func); i++) {
+ if (cmd == hclge_dbg_cmd_func[i].cmd) {
+ cmd_func = &hclge_dbg_cmd_func[i];
+ if (cmd_func->dbg_dump)
+ return cmd_func->dbg_dump(hdev, buf, len);
+ else
+ return cmd_func->dbg_dump_reg(hdev, cmd, buf,
+ len);
+ }
+ }
+
+ dev_err(&hdev->pdev->dev, "invalid command(%d)\n", cmd);
+ return -EINVAL;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
new file mode 100644
index 0000000000..724052928b
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
@@ -0,0 +1,774 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2018-2019 Hisilicon Limited. */
+
+#ifndef __HCLGE_DEBUGFS_H
+#define __HCLGE_DEBUGFS_H
+
+#include <linux/etherdevice.h>
+#include "hclge_cmd.h"
+
+#define HCLGE_DBG_MNG_TBL_MAX 64
+
+#define HCLGE_DBG_MNG_VLAN_MASK_B BIT(0)
+#define HCLGE_DBG_MNG_MAC_MASK_B BIT(1)
+#define HCLGE_DBG_MNG_ETHER_MASK_B BIT(2)
+#define HCLGE_DBG_MNG_E_TYPE_B BIT(11)
+#define HCLGE_DBG_MNG_DROP_B BIT(13)
+#define HCLGE_DBG_MNG_VLAN_TAG 0x0FFF
+#define HCLGE_DBG_MNG_PF_ID 0x0007
+#define HCLGE_DBG_MNG_VF_ID 0x00FF
+
+/* Get DFX BD number offset */
+#define HCLGE_DBG_DFX_BIOS_OFFSET 1
+#define HCLGE_DBG_DFX_SSU_0_OFFSET 2
+#define HCLGE_DBG_DFX_SSU_1_OFFSET 3
+#define HCLGE_DBG_DFX_IGU_OFFSET 4
+#define HCLGE_DBG_DFX_RPU_0_OFFSET 5
+
+#define HCLGE_DBG_DFX_RPU_1_OFFSET 6
+#define HCLGE_DBG_DFX_NCSI_OFFSET 7
+#define HCLGE_DBG_DFX_RTC_OFFSET 8
+#define HCLGE_DBG_DFX_PPP_OFFSET 9
+#define HCLGE_DBG_DFX_RCB_OFFSET 10
+#define HCLGE_DBG_DFX_TQP_OFFSET 11
+
+#define HCLGE_DBG_DFX_SSU_2_OFFSET 12
+
+struct hclge_qos_pri_map_cmd {
+ u8 pri0_tc : 4,
+ pri1_tc : 4;
+ u8 pri2_tc : 4,
+ pri3_tc : 4;
+ u8 pri4_tc : 4,
+ pri5_tc : 4;
+ u8 pri6_tc : 4,
+ pri7_tc : 4;
+ u8 vlan_pri : 4,
+ rev : 4;
+};
+
+struct hclge_dbg_bitmap_cmd {
+ union {
+ u8 bitmap;
+ struct {
+ u8 bit0 : 1,
+ bit1 : 1,
+ bit2 : 1,
+ bit3 : 1,
+ bit4 : 1,
+ bit5 : 1,
+ bit6 : 1,
+ bit7 : 1;
+ };
+ };
+};
+
+struct hclge_dbg_reg_common_msg {
+ int msg_num;
+ int offset;
+ enum hclge_opcode_type cmd;
+};
+
+struct hclge_dbg_tcam_msg {
+ u8 stage;
+ u32 loc;
+};
+
+#define HCLGE_DBG_MAX_DFX_MSG_LEN 60
+struct hclge_dbg_dfx_message {
+ int flag;
+ char message[HCLGE_DBG_MAX_DFX_MSG_LEN];
+};
+
+#define HCLGE_DBG_MAC_REG_TYPE_LEN 32
+struct hclge_dbg_reg_type_info {
+ enum hnae3_dbg_cmd cmd;
+ const struct hclge_dbg_dfx_message *dfx_msg;
+ struct hclge_dbg_reg_common_msg reg_msg;
+};
+
+struct hclge_dbg_func {
+ enum hnae3_dbg_cmd cmd;
+ int (*dbg_dump)(struct hclge_dev *hdev, char *buf, int len);
+ int (*dbg_dump_reg)(struct hclge_dev *hdev, enum hnae3_dbg_cmd cmd,
+ char *buf, int len);
+};
+
+struct hclge_dbg_status_dfx_info {
+ u32 offset;
+ char message[HCLGE_DBG_MAX_DFX_MSG_LEN];
+};
+
+static const struct hclge_dbg_dfx_message hclge_dbg_bios_common_reg[] = {
+ {false, "Reserved"},
+ {true, "BP_CPU_STATE"},
+ {true, "DFX_MSIX_INFO_NIC_0"},
+ {true, "DFX_MSIX_INFO_NIC_1"},
+ {true, "DFX_MSIX_INFO_NIC_2"},
+ {true, "DFX_MSIX_INFO_NIC_3"},
+
+ {true, "DFX_MSIX_INFO_ROC_0"},
+ {true, "DFX_MSIX_INFO_ROC_1"},
+ {true, "DFX_MSIX_INFO_ROC_2"},
+ {true, "DFX_MSIX_INFO_ROC_3"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+};
+
+static const struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_0[] = {
+ {false, "Reserved"},
+ {true, "SSU_ETS_PORT_STATUS"},
+ {true, "SSU_ETS_TCG_STATUS"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {true, "SSU_BP_STATUS_0"},
+
+ {true, "SSU_BP_STATUS_1"},
+ {true, "SSU_BP_STATUS_2"},
+ {true, "SSU_BP_STATUS_3"},
+ {true, "SSU_BP_STATUS_4"},
+ {true, "SSU_BP_STATUS_5"},
+ {true, "SSU_MAC_TX_PFC_IND"},
+
+ {true, "MAC_SSU_RX_PFC_IND"},
+ {true, "BTMP_AGEING_ST_B0"},
+ {true, "BTMP_AGEING_ST_B1"},
+ {true, "BTMP_AGEING_ST_B2"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+
+ {true, "FULL_DROP_NUM"},
+ {true, "PART_DROP_NUM"},
+ {true, "PPP_KEY_DROP_NUM"},
+ {true, "PPP_RLT_DROP_NUM"},
+ {true, "LO_PRI_UNICAST_RLT_DROP_NUM"},
+ {true, "HI_PRI_MULTICAST_RLT_DROP_NUM"},
+
+ {true, "LO_PRI_MULTICAST_RLT_DROP_NUM"},
+ {true, "NCSI_PACKET_CURR_BUFFER_CNT"},
+ {true, "BTMP_AGEING_RLS_CNT_BANK0"},
+ {true, "BTMP_AGEING_RLS_CNT_BANK1"},
+ {true, "BTMP_AGEING_RLS_CNT_BANK2"},
+ {true, "SSU_MB_RD_RLT_DROP_CNT"},
+
+ {true, "SSU_PPP_MAC_KEY_NUM_L"},
+ {true, "SSU_PPP_MAC_KEY_NUM_H"},
+ {true, "SSU_PPP_HOST_KEY_NUM_L"},
+ {true, "SSU_PPP_HOST_KEY_NUM_H"},
+ {true, "PPP_SSU_MAC_RLT_NUM_L"},
+ {true, "PPP_SSU_MAC_RLT_NUM_H"},
+
+ {true, "PPP_SSU_HOST_RLT_NUM_L"},
+ {true, "PPP_SSU_HOST_RLT_NUM_H"},
+ {true, "NCSI_RX_PACKET_IN_CNT_L"},
+ {true, "NCSI_RX_PACKET_IN_CNT_H"},
+ {true, "NCSI_TX_PACKET_OUT_CNT_L"},
+ {true, "NCSI_TX_PACKET_OUT_CNT_H"},
+
+ {true, "SSU_KEY_DROP_NUM"},
+ {true, "MB_UNCOPY_NUM"},
+ {true, "RX_OQ_DROP_PKT_CNT"},
+ {true, "TX_OQ_DROP_PKT_CNT"},
+ {true, "BANK_UNBALANCE_DROP_CNT"},
+ {true, "BANK_UNBALANCE_RX_DROP_CNT"},
+
+ {true, "NIC_L2_ERR_DROP_PKT_CNT"},
+ {true, "ROC_L2_ERR_DROP_PKT_CNT"},
+ {true, "NIC_L2_ERR_DROP_PKT_CNT_RX"},
+ {true, "ROC_L2_ERR_DROP_PKT_CNT_RX"},
+ {true, "RX_OQ_GLB_DROP_PKT_CNT"},
+ {false, "Reserved"},
+
+ {true, "LO_PRI_UNICAST_CUR_CNT"},
+ {true, "HI_PRI_MULTICAST_CUR_CNT"},
+ {true, "LO_PRI_MULTICAST_CUR_CNT"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+};
+
+static const struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_1[] = {
+ {true, "prt_id"},
+ {true, "PACKET_TC_CURR_BUFFER_CNT_0"},
+ {true, "PACKET_TC_CURR_BUFFER_CNT_1"},
+ {true, "PACKET_TC_CURR_BUFFER_CNT_2"},
+ {true, "PACKET_TC_CURR_BUFFER_CNT_3"},
+ {true, "PACKET_TC_CURR_BUFFER_CNT_4"},
+
+ {true, "PACKET_TC_CURR_BUFFER_CNT_5"},
+ {true, "PACKET_TC_CURR_BUFFER_CNT_6"},
+ {true, "PACKET_TC_CURR_BUFFER_CNT_7"},
+ {true, "PACKET_CURR_BUFFER_CNT"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+
+ {true, "RX_PACKET_IN_CNT_L"},
+ {true, "RX_PACKET_IN_CNT_H"},
+ {true, "RX_PACKET_OUT_CNT_L"},
+ {true, "RX_PACKET_OUT_CNT_H"},
+ {true, "TX_PACKET_IN_CNT_L"},
+ {true, "TX_PACKET_IN_CNT_H"},
+
+ {true, "TX_PACKET_OUT_CNT_L"},
+ {true, "TX_PACKET_OUT_CNT_H"},
+ {true, "ROC_RX_PACKET_IN_CNT_L"},
+ {true, "ROC_RX_PACKET_IN_CNT_H"},
+ {true, "ROC_TX_PACKET_OUT_CNT_L"},
+ {true, "ROC_TX_PACKET_OUT_CNT_H"},
+
+ {true, "RX_PACKET_TC_IN_CNT_0_L"},
+ {true, "RX_PACKET_TC_IN_CNT_0_H"},
+ {true, "RX_PACKET_TC_IN_CNT_1_L"},
+ {true, "RX_PACKET_TC_IN_CNT_1_H"},
+ {true, "RX_PACKET_TC_IN_CNT_2_L"},
+ {true, "RX_PACKET_TC_IN_CNT_2_H"},
+
+ {true, "RX_PACKET_TC_IN_CNT_3_L"},
+ {true, "RX_PACKET_TC_IN_CNT_3_H"},
+ {true, "RX_PACKET_TC_IN_CNT_4_L"},
+ {true, "RX_PACKET_TC_IN_CNT_4_H"},
+ {true, "RX_PACKET_TC_IN_CNT_5_L"},
+ {true, "RX_PACKET_TC_IN_CNT_5_H"},
+
+ {true, "RX_PACKET_TC_IN_CNT_6_L"},
+ {true, "RX_PACKET_TC_IN_CNT_6_H"},
+ {true, "RX_PACKET_TC_IN_CNT_7_L"},
+ {true, "RX_PACKET_TC_IN_CNT_7_H"},
+ {true, "RX_PACKET_TC_OUT_CNT_0_L"},
+ {true, "RX_PACKET_TC_OUT_CNT_0_H"},
+
+ {true, "RX_PACKET_TC_OUT_CNT_1_L"},
+ {true, "RX_PACKET_TC_OUT_CNT_1_H"},
+ {true, "RX_PACKET_TC_OUT_CNT_2_L"},
+ {true, "RX_PACKET_TC_OUT_CNT_2_H"},
+ {true, "RX_PACKET_TC_OUT_CNT_3_L"},
+ {true, "RX_PACKET_TC_OUT_CNT_3_H"},
+
+ {true, "RX_PACKET_TC_OUT_CNT_4_L"},
+ {true, "RX_PACKET_TC_OUT_CNT_4_H"},
+ {true, "RX_PACKET_TC_OUT_CNT_5_L"},
+ {true, "RX_PACKET_TC_OUT_CNT_5_H"},
+ {true, "RX_PACKET_TC_OUT_CNT_6_L"},
+ {true, "RX_PACKET_TC_OUT_CNT_6_H"},
+
+ {true, "RX_PACKET_TC_OUT_CNT_7_L"},
+ {true, "RX_PACKET_TC_OUT_CNT_7_H"},
+ {true, "TX_PACKET_TC_IN_CNT_0_L"},
+ {true, "TX_PACKET_TC_IN_CNT_0_H"},
+ {true, "TX_PACKET_TC_IN_CNT_1_L"},
+ {true, "TX_PACKET_TC_IN_CNT_1_H"},
+
+ {true, "TX_PACKET_TC_IN_CNT_2_L"},
+ {true, "TX_PACKET_TC_IN_CNT_2_H"},
+ {true, "TX_PACKET_TC_IN_CNT_3_L"},
+ {true, "TX_PACKET_TC_IN_CNT_3_H"},
+ {true, "TX_PACKET_TC_IN_CNT_4_L"},
+ {true, "TX_PACKET_TC_IN_CNT_4_H"},
+
+ {true, "TX_PACKET_TC_IN_CNT_5_L"},
+ {true, "TX_PACKET_TC_IN_CNT_5_H"},
+ {true, "TX_PACKET_TC_IN_CNT_6_L"},
+ {true, "TX_PACKET_TC_IN_CNT_6_H"},
+ {true, "TX_PACKET_TC_IN_CNT_7_L"},
+ {true, "TX_PACKET_TC_IN_CNT_7_H"},
+
+ {true, "TX_PACKET_TC_OUT_CNT_0_L"},
+ {true, "TX_PACKET_TC_OUT_CNT_0_H"},
+ {true, "TX_PACKET_TC_OUT_CNT_1_L"},
+ {true, "TX_PACKET_TC_OUT_CNT_1_H"},
+ {true, "TX_PACKET_TC_OUT_CNT_2_L"},
+ {true, "TX_PACKET_TC_OUT_CNT_2_H"},
+
+ {true, "TX_PACKET_TC_OUT_CNT_3_L"},
+ {true, "TX_PACKET_TC_OUT_CNT_3_H"},
+ {true, "TX_PACKET_TC_OUT_CNT_4_L"},
+ {true, "TX_PACKET_TC_OUT_CNT_4_H"},
+ {true, "TX_PACKET_TC_OUT_CNT_5_L"},
+ {true, "TX_PACKET_TC_OUT_CNT_5_H"},
+
+ {true, "TX_PACKET_TC_OUT_CNT_6_L"},
+ {true, "TX_PACKET_TC_OUT_CNT_6_H"},
+ {true, "TX_PACKET_TC_OUT_CNT_7_L"},
+ {true, "TX_PACKET_TC_OUT_CNT_7_H"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+};
+
+static const struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_2[] = {
+ {true, "OQ_INDEX"},
+ {true, "QUEUE_CNT"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+};
+
+static const struct hclge_dbg_dfx_message hclge_dbg_igu_egu_reg[] = {
+ {true, "prt_id"},
+ {true, "IGU_RX_ERR_PKT"},
+ {true, "IGU_RX_NO_SOF_PKT"},
+ {true, "EGU_TX_1588_SHORT_PKT"},
+ {true, "EGU_TX_1588_PKT"},
+ {true, "EGU_TX_ERR_PKT"},
+
+ {true, "IGU_RX_OUT_L2_PKT"},
+ {true, "IGU_RX_OUT_L3_PKT"},
+ {true, "IGU_RX_OUT_L4_PKT"},
+ {true, "IGU_RX_IN_L2_PKT"},
+ {true, "IGU_RX_IN_L3_PKT"},
+ {true, "IGU_RX_IN_L4_PKT"},
+
+ {true, "IGU_RX_EL3E_PKT"},
+ {true, "IGU_RX_EL4E_PKT"},
+ {true, "IGU_RX_L3E_PKT"},
+ {true, "IGU_RX_L4E_PKT"},
+ {true, "IGU_RX_ROCEE_PKT"},
+ {true, "IGU_RX_OUT_UDP0_PKT"},
+
+ {true, "IGU_RX_IN_UDP0_PKT"},
+ {true, "IGU_MC_CAR_DROP_PKT_L"},
+ {true, "IGU_MC_CAR_DROP_PKT_H"},
+ {true, "IGU_BC_CAR_DROP_PKT_L"},
+ {true, "IGU_BC_CAR_DROP_PKT_H"},
+ {false, "Reserved"},
+
+ {true, "IGU_RX_OVERSIZE_PKT_L"},
+ {true, "IGU_RX_OVERSIZE_PKT_H"},
+ {true, "IGU_RX_UNDERSIZE_PKT_L"},
+ {true, "IGU_RX_UNDERSIZE_PKT_H"},
+ {true, "IGU_RX_OUT_ALL_PKT_L"},
+ {true, "IGU_RX_OUT_ALL_PKT_H"},
+
+ {true, "IGU_TX_OUT_ALL_PKT_L"},
+ {true, "IGU_TX_OUT_ALL_PKT_H"},
+ {true, "IGU_RX_UNI_PKT_L"},
+ {true, "IGU_RX_UNI_PKT_H"},
+ {true, "IGU_RX_MULTI_PKT_L"},
+ {true, "IGU_RX_MULTI_PKT_H"},
+
+ {true, "IGU_RX_BROAD_PKT_L"},
+ {true, "IGU_RX_BROAD_PKT_H"},
+ {true, "EGU_TX_OUT_ALL_PKT_L"},
+ {true, "EGU_TX_OUT_ALL_PKT_H"},
+ {true, "EGU_TX_UNI_PKT_L"},
+ {true, "EGU_TX_UNI_PKT_H"},
+
+ {true, "EGU_TX_MULTI_PKT_L"},
+ {true, "EGU_TX_MULTI_PKT_H"},
+ {true, "EGU_TX_BROAD_PKT_L"},
+ {true, "EGU_TX_BROAD_PKT_H"},
+ {true, "IGU_TX_KEY_NUM_L"},
+ {true, "IGU_TX_KEY_NUM_H"},
+
+ {true, "IGU_RX_NON_TUN_PKT_L"},
+ {true, "IGU_RX_NON_TUN_PKT_H"},
+ {true, "IGU_RX_TUN_PKT_L"},
+ {true, "IGU_RX_TUN_PKT_H"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+};
+
+static const struct hclge_dbg_dfx_message hclge_dbg_rpu_reg_0[] = {
+ {true, "tc_queue_num"},
+ {true, "FSM_DFX_ST0"},
+ {true, "FSM_DFX_ST1"},
+ {true, "RPU_RX_PKT_DROP_CNT"},
+ {true, "BUF_WAIT_TIMEOUT"},
+ {true, "BUF_WAIT_TIMEOUT_QID"},
+};
+
+static const struct hclge_dbg_dfx_message hclge_dbg_rpu_reg_1[] = {
+ {false, "Reserved"},
+ {true, "FIFO_DFX_ST0"},
+ {true, "FIFO_DFX_ST1"},
+ {true, "FIFO_DFX_ST2"},
+ {true, "FIFO_DFX_ST3"},
+ {true, "FIFO_DFX_ST4"},
+
+ {true, "FIFO_DFX_ST5"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+};
+
+static const struct hclge_dbg_dfx_message hclge_dbg_ncsi_reg[] = {
+ {false, "Reserved"},
+ {true, "NCSI_EGU_TX_FIFO_STS"},
+ {true, "NCSI_PAUSE_STATUS"},
+ {true, "NCSI_RX_CTRL_DMAC_ERR_CNT"},
+ {true, "NCSI_RX_CTRL_SMAC_ERR_CNT"},
+ {true, "NCSI_RX_CTRL_CKS_ERR_CNT"},
+
+ {true, "NCSI_RX_CTRL_PKT_CNT"},
+ {true, "NCSI_RX_PT_DMAC_ERR_CNT"},
+ {true, "NCSI_RX_PT_SMAC_ERR_CNT"},
+ {true, "NCSI_RX_PT_PKT_CNT"},
+ {true, "NCSI_RX_FCS_ERR_CNT"},
+ {true, "NCSI_TX_CTRL_DMAC_ERR_CNT"},
+
+ {true, "NCSI_TX_CTRL_SMAC_ERR_CNT"},
+ {true, "NCSI_TX_CTRL_PKT_CNT"},
+ {true, "NCSI_TX_PT_DMAC_ERR_CNT"},
+ {true, "NCSI_TX_PT_SMAC_ERR_CNT"},
+ {true, "NCSI_TX_PT_PKT_CNT"},
+ {true, "NCSI_TX_PT_PKT_TRUNC_CNT"},
+
+ {true, "NCSI_TX_PT_PKT_ERR_CNT"},
+ {true, "NCSI_TX_CTRL_PKT_ERR_CNT"},
+ {true, "NCSI_RX_CTRL_PKT_TRUNC_CNT"},
+ {true, "NCSI_RX_CTRL_PKT_CFLIT_CNT"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+
+ {true, "NCSI_MAC_RX_OCTETS_OK"},
+ {true, "NCSI_MAC_RX_OCTETS_BAD"},
+ {true, "NCSI_MAC_RX_UC_PKTS"},
+ {true, "NCSI_MAC_RX_MC_PKTS"},
+ {true, "NCSI_MAC_RX_BC_PKTS"},
+ {true, "NCSI_MAC_RX_PKTS_64OCTETS"},
+
+ {true, "NCSI_MAC_RX_PKTS_65TO127OCTETS"},
+ {true, "NCSI_MAC_RX_PKTS_128TO255OCTETS"},
+ {true, "NCSI_MAC_RX_PKTS_255TO511OCTETS"},
+ {true, "NCSI_MAC_RX_PKTS_512TO1023OCTETS"},
+ {true, "NCSI_MAC_RX_PKTS_1024TO1518OCTETS"},
+ {true, "NCSI_MAC_RX_PKTS_1519TOMAXOCTETS"},
+
+ {true, "NCSI_MAC_RX_FCS_ERRORS"},
+ {true, "NCSI_MAC_RX_LONG_ERRORS"},
+ {true, "NCSI_MAC_RX_JABBER_ERRORS"},
+ {true, "NCSI_MAC_RX_RUNT_ERR_CNT"},
+ {true, "NCSI_MAC_RX_SHORT_ERR_CNT"},
+ {true, "NCSI_MAC_RX_FILT_PKT_CNT"},
+
+ {true, "NCSI_MAC_RX_OCTETS_TOTAL_FILT"},
+ {true, "NCSI_MAC_TX_OCTETS_OK"},
+ {true, "NCSI_MAC_TX_OCTETS_BAD"},
+ {true, "NCSI_MAC_TX_UC_PKTS"},
+ {true, "NCSI_MAC_TX_MC_PKTS"},
+ {true, "NCSI_MAC_TX_BC_PKTS"},
+
+ {true, "NCSI_MAC_TX_PKTS_64OCTETS"},
+ {true, "NCSI_MAC_TX_PKTS_65TO127OCTETS"},
+ {true, "NCSI_MAC_TX_PKTS_128TO255OCTETS"},
+ {true, "NCSI_MAC_TX_PKTS_256TO511OCTETS"},
+ {true, "NCSI_MAC_TX_PKTS_512TO1023OCTETS"},
+ {true, "NCSI_MAC_TX_PKTS_1024TO1518OCTETS"},
+
+ {true, "NCSI_MAC_TX_PKTS_1519TOMAXOCTETS"},
+ {true, "NCSI_MAC_TX_UNDERRUN"},
+ {true, "NCSI_MAC_TX_CRC_ERROR"},
+ {true, "NCSI_MAC_TX_PAUSE_FRAMES"},
+ {true, "NCSI_MAC_RX_PAD_PKTS"},
+ {true, "NCSI_MAC_RX_PAUSE_FRAMES"},
+};
+
+static const struct hclge_dbg_dfx_message hclge_dbg_rtc_reg[] = {
+ {false, "Reserved"},
+ {true, "LGE_IGU_AFIFO_DFX_0"},
+ {true, "LGE_IGU_AFIFO_DFX_1"},
+ {true, "LGE_IGU_AFIFO_DFX_2"},
+ {true, "LGE_IGU_AFIFO_DFX_3"},
+ {true, "LGE_IGU_AFIFO_DFX_4"},
+
+ {true, "LGE_IGU_AFIFO_DFX_5"},
+ {true, "LGE_IGU_AFIFO_DFX_6"},
+ {true, "LGE_IGU_AFIFO_DFX_7"},
+ {true, "LGE_EGU_AFIFO_DFX_0"},
+ {true, "LGE_EGU_AFIFO_DFX_1"},
+ {true, "LGE_EGU_AFIFO_DFX_2"},
+
+ {true, "LGE_EGU_AFIFO_DFX_3"},
+ {true, "LGE_EGU_AFIFO_DFX_4"},
+ {true, "LGE_EGU_AFIFO_DFX_5"},
+ {true, "LGE_EGU_AFIFO_DFX_6"},
+ {true, "LGE_EGU_AFIFO_DFX_7"},
+ {true, "CGE_IGU_AFIFO_DFX_0"},
+
+ {true, "CGE_IGU_AFIFO_DFX_1"},
+ {true, "CGE_EGU_AFIFO_DFX_0"},
+ {true, "CGE_EGU_AFIFO_DFX_1"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+};
+
+static const struct hclge_dbg_dfx_message hclge_dbg_ppp_reg[] = {
+ {false, "Reserved"},
+ {true, "DROP_FROM_PRT_PKT_CNT"},
+ {true, "DROP_FROM_HOST_PKT_CNT"},
+ {true, "DROP_TX_VLAN_PROC_CNT"},
+ {true, "DROP_MNG_CNT"},
+ {true, "DROP_FD_CNT"},
+
+ {true, "DROP_NO_DST_CNT"},
+ {true, "DROP_MC_MBID_FULL_CNT"},
+ {true, "DROP_SC_FILTERED"},
+ {true, "PPP_MC_DROP_PKT_CNT"},
+ {true, "DROP_PT_CNT"},
+ {true, "DROP_MAC_ANTI_SPOOF_CNT"},
+
+ {true, "DROP_IG_VFV_CNT"},
+ {true, "DROP_IG_PRTV_CNT"},
+ {true, "DROP_CNM_PFC_PAUSE_CNT"},
+ {true, "DROP_TORUS_TC_CNT"},
+ {true, "DROP_TORUS_LPBK_CNT"},
+ {true, "PPP_HFS_STS"},
+
+ {true, "PPP_MC_RSLT_STS"},
+ {true, "PPP_P3U_STS"},
+ {true, "PPP_RSLT_DESCR_STS"},
+ {true, "PPP_UMV_STS_0"},
+ {true, "PPP_UMV_STS_1"},
+ {true, "PPP_VFV_STS"},
+
+ {true, "PPP_GRO_KEY_CNT"},
+ {true, "PPP_GRO_INFO_CNT"},
+ {true, "PPP_GRO_DROP_CNT"},
+ {true, "PPP_GRO_OUT_CNT"},
+ {true, "PPP_GRO_KEY_MATCH_DATA_CNT"},
+ {true, "PPP_GRO_KEY_MATCH_TCAM_CNT"},
+
+ {true, "PPP_GRO_INFO_MATCH_CNT"},
+ {true, "PPP_GRO_FREE_ENTRY_CNT"},
+ {true, "PPP_GRO_INNER_DFX_SIGNAL"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+
+ {true, "GET_RX_PKT_CNT_L"},
+ {true, "GET_RX_PKT_CNT_H"},
+ {true, "GET_TX_PKT_CNT_L"},
+ {true, "GET_TX_PKT_CNT_H"},
+ {true, "SEND_UC_PRT2HOST_PKT_CNT_L"},
+ {true, "SEND_UC_PRT2HOST_PKT_CNT_H"},
+
+ {true, "SEND_UC_PRT2PRT_PKT_CNT_L"},
+ {true, "SEND_UC_PRT2PRT_PKT_CNT_H"},
+ {true, "SEND_UC_HOST2HOST_PKT_CNT_L"},
+ {true, "SEND_UC_HOST2HOST_PKT_CNT_H"},
+ {true, "SEND_UC_HOST2PRT_PKT_CNT_L"},
+ {true, "SEND_UC_HOST2PRT_PKT_CNT_H"},
+
+ {true, "SEND_MC_FROM_PRT_CNT_L"},
+ {true, "SEND_MC_FROM_PRT_CNT_H"},
+ {true, "SEND_MC_FROM_HOST_CNT_L"},
+ {true, "SEND_MC_FROM_HOST_CNT_H"},
+ {true, "SSU_MC_RD_CNT_L"},
+ {true, "SSU_MC_RD_CNT_H"},
+
+ {true, "SSU_MC_DROP_CNT_L"},
+ {true, "SSU_MC_DROP_CNT_H"},
+ {true, "SSU_MC_RD_PKT_CNT_L"},
+ {true, "SSU_MC_RD_PKT_CNT_H"},
+ {true, "PPP_MC_2HOST_PKT_CNT_L"},
+ {true, "PPP_MC_2HOST_PKT_CNT_H"},
+
+ {true, "PPP_MC_2PRT_PKT_CNT_L"},
+ {true, "PPP_MC_2PRT_PKT_CNT_H"},
+ {true, "NTSNOS_PKT_CNT_L"},
+ {true, "NTSNOS_PKT_CNT_H"},
+ {true, "NTUP_PKT_CNT_L"},
+ {true, "NTUP_PKT_CNT_H"},
+
+ {true, "NTLCL_PKT_CNT_L"},
+ {true, "NTLCL_PKT_CNT_H"},
+ {true, "NTTGT_PKT_CNT_L"},
+ {true, "NTTGT_PKT_CNT_H"},
+ {true, "RTNS_PKT_CNT_L"},
+ {true, "RTNS_PKT_CNT_H"},
+
+ {true, "RTLPBK_PKT_CNT_L"},
+ {true, "RTLPBK_PKT_CNT_H"},
+ {true, "NR_PKT_CNT_L"},
+ {true, "NR_PKT_CNT_H"},
+ {true, "RR_PKT_CNT_L"},
+ {true, "RR_PKT_CNT_H"},
+
+ {true, "MNG_TBL_HIT_CNT_L"},
+ {true, "MNG_TBL_HIT_CNT_H"},
+ {true, "FD_TBL_HIT_CNT_L"},
+ {true, "FD_TBL_HIT_CNT_H"},
+ {true, "FD_LKUP_CNT_L"},
+ {true, "FD_LKUP_CNT_H"},
+
+ {true, "BC_HIT_CNT_L"},
+ {true, "BC_HIT_CNT_H"},
+ {true, "UM_TBL_UC_HIT_CNT_L"},
+ {true, "UM_TBL_UC_HIT_CNT_H"},
+ {true, "UM_TBL_MC_HIT_CNT_L"},
+ {true, "UM_TBL_MC_HIT_CNT_H"},
+
+ {true, "UM_TBL_VMDQ1_HIT_CNT_L"},
+ {true, "UM_TBL_VMDQ1_HIT_CNT_H"},
+ {true, "MTA_TBL_HIT_CNT_L"},
+ {true, "MTA_TBL_HIT_CNT_H"},
+ {true, "FWD_BONDING_HIT_CNT_L"},
+ {true, "FWD_BONDING_HIT_CNT_H"},
+
+ {true, "PROMIS_TBL_HIT_CNT_L"},
+ {true, "PROMIS_TBL_HIT_CNT_H"},
+ {true, "GET_TUNL_PKT_CNT_L"},
+ {true, "GET_TUNL_PKT_CNT_H"},
+ {true, "GET_BMC_PKT_CNT_L"},
+ {true, "GET_BMC_PKT_CNT_H"},
+
+ {true, "SEND_UC_PRT2BMC_PKT_CNT_L"},
+ {true, "SEND_UC_PRT2BMC_PKT_CNT_H"},
+ {true, "SEND_UC_HOST2BMC_PKT_CNT_L"},
+ {true, "SEND_UC_HOST2BMC_PKT_CNT_H"},
+ {true, "SEND_UC_BMC2HOST_PKT_CNT_L"},
+ {true, "SEND_UC_BMC2HOST_PKT_CNT_H"},
+
+ {true, "SEND_UC_BMC2PRT_PKT_CNT_L"},
+ {true, "SEND_UC_BMC2PRT_PKT_CNT_H"},
+ {true, "PPP_MC_2BMC_PKT_CNT_L"},
+ {true, "PPP_MC_2BMC_PKT_CNT_H"},
+ {true, "VLAN_MIRR_CNT_L"},
+ {true, "VLAN_MIRR_CNT_H"},
+
+ {true, "IG_MIRR_CNT_L"},
+ {true, "IG_MIRR_CNT_H"},
+ {true, "EG_MIRR_CNT_L"},
+ {true, "EG_MIRR_CNT_H"},
+ {true, "RX_DEFAULT_HOST_HIT_CNT_L"},
+ {true, "RX_DEFAULT_HOST_HIT_CNT_H"},
+
+ {true, "LAN_PAIR_CNT_L"},
+ {true, "LAN_PAIR_CNT_H"},
+ {true, "UM_TBL_MC_HIT_PKT_CNT_L"},
+ {true, "UM_TBL_MC_HIT_PKT_CNT_H"},
+ {true, "MTA_TBL_HIT_PKT_CNT_L"},
+ {true, "MTA_TBL_HIT_PKT_CNT_H"},
+
+ {true, "PROMIS_TBL_HIT_PKT_CNT_L"},
+ {true, "PROMIS_TBL_HIT_PKT_CNT_H"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+};
+
+static const struct hclge_dbg_dfx_message hclge_dbg_rcb_reg[] = {
+ {false, "Reserved"},
+ {true, "FSM_DFX_ST0"},
+ {true, "FSM_DFX_ST1"},
+ {true, "FSM_DFX_ST2"},
+ {true, "FIFO_DFX_ST0"},
+ {true, "FIFO_DFX_ST1"},
+
+ {true, "FIFO_DFX_ST2"},
+ {true, "FIFO_DFX_ST3"},
+ {true, "FIFO_DFX_ST4"},
+ {true, "FIFO_DFX_ST5"},
+ {true, "FIFO_DFX_ST6"},
+ {true, "FIFO_DFX_ST7"},
+
+ {true, "FIFO_DFX_ST8"},
+ {true, "FIFO_DFX_ST9"},
+ {true, "FIFO_DFX_ST10"},
+ {true, "FIFO_DFX_ST11"},
+ {true, "Q_CREDIT_VLD_0"},
+ {true, "Q_CREDIT_VLD_1"},
+
+ {true, "Q_CREDIT_VLD_2"},
+ {true, "Q_CREDIT_VLD_3"},
+ {true, "Q_CREDIT_VLD_4"},
+ {true, "Q_CREDIT_VLD_5"},
+ {true, "Q_CREDIT_VLD_6"},
+ {true, "Q_CREDIT_VLD_7"},
+
+ {true, "Q_CREDIT_VLD_8"},
+ {true, "Q_CREDIT_VLD_9"},
+ {true, "Q_CREDIT_VLD_10"},
+ {true, "Q_CREDIT_VLD_11"},
+ {true, "Q_CREDIT_VLD_12"},
+ {true, "Q_CREDIT_VLD_13"},
+
+ {true, "Q_CREDIT_VLD_14"},
+ {true, "Q_CREDIT_VLD_15"},
+ {true, "Q_CREDIT_VLD_16"},
+ {true, "Q_CREDIT_VLD_17"},
+ {true, "Q_CREDIT_VLD_18"},
+ {true, "Q_CREDIT_VLD_19"},
+
+ {true, "Q_CREDIT_VLD_20"},
+ {true, "Q_CREDIT_VLD_21"},
+ {true, "Q_CREDIT_VLD_22"},
+ {true, "Q_CREDIT_VLD_23"},
+ {true, "Q_CREDIT_VLD_24"},
+ {true, "Q_CREDIT_VLD_25"},
+
+ {true, "Q_CREDIT_VLD_26"},
+ {true, "Q_CREDIT_VLD_27"},
+ {true, "Q_CREDIT_VLD_28"},
+ {true, "Q_CREDIT_VLD_29"},
+ {true, "Q_CREDIT_VLD_30"},
+ {true, "Q_CREDIT_VLD_31"},
+
+ {true, "GRO_BD_SERR_CNT"},
+ {true, "GRO_CONTEXT_SERR_CNT"},
+ {true, "RX_STASH_CFG_SERR_CNT"},
+ {true, "AXI_RD_FBD_SERR_CNT"},
+ {true, "GRO_BD_MERR_CNT"},
+ {true, "GRO_CONTEXT_MERR_CNT"},
+
+ {true, "RX_STASH_CFG_MERR_CNT"},
+ {true, "AXI_RD_FBD_MERR_CNT"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+};
+
+static const struct hclge_dbg_dfx_message hclge_dbg_tqp_reg[] = {
+ {true, "q_num"},
+ {true, "RCB_CFG_RX_RING_TAIL"},
+ {true, "RCB_CFG_RX_RING_HEAD"},
+ {true, "RCB_CFG_RX_RING_FBDNUM"},
+ {true, "RCB_CFG_RX_RING_OFFSET"},
+ {true, "RCB_CFG_RX_RING_FBDOFFSET"},
+
+ {true, "RCB_CFG_RX_RING_PKTNUM_RECORD"},
+ {true, "RCB_CFG_TX_RING_TAIL"},
+ {true, "RCB_CFG_TX_RING_HEAD"},
+ {true, "RCB_CFG_TX_RING_FBDNUM"},
+ {true, "RCB_CFG_TX_RING_OFFSET"},
+ {true, "RCB_CFG_TX_RING_EBDNUM"},
+};
+
+#define HCLGE_DBG_INFO_LEN 256
+#define HCLGE_DBG_VLAN_FLTR_INFO_LEN 256
+#define HCLGE_DBG_VLAN_OFFLOAD_INFO_LEN 512
+#define HCLGE_DBG_ID_LEN 16
+#define HCLGE_DBG_ITEM_NAME_LEN 32
+#define HCLGE_DBG_DATA_STR_LEN 32
+#define HCLGE_DBG_TM_INFO_LEN 256
+
+#define HCLGE_BILLION_NANO_SECONDS 1000000000
+
+struct hclge_dbg_item {
+ char name[HCLGE_DBG_ITEM_NAME_LEN];
+ u16 interval; /* blank numbers after the item */
+};
+
+struct hclge_dbg_vlan_cfg {
+ u16 pvid;
+ u8 accept_tag1;
+ u8 accept_tag2;
+ u8 accept_untag1;
+ u8 accept_untag2;
+ u8 insert_tag1;
+ u8 insert_tag2;
+ u8 shift_tag;
+ u8 strip_tag1;
+ u8 strip_tag2;
+ u8 drop_tag1;
+ u8 drop_tag2;
+ u8 pri_only1;
+ u8 pri_only2;
+};
+
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c
new file mode 100644
index 0000000000..9a939c0b21
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2021 Hisilicon Limited. */
+
+#include <net/devlink.h>
+
+#include "hclge_devlink.h"
+
+static int hclge_devlink_info_get(struct devlink *devlink,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+#define HCLGE_DEVLINK_FW_STRING_LEN 32
+ struct hclge_devlink_priv *priv = devlink_priv(devlink);
+ char version_str[HCLGE_DEVLINK_FW_STRING_LEN];
+ struct hclge_dev *hdev = priv->hdev;
+
+ snprintf(version_str, sizeof(version_str), "%lu.%lu.%lu.%lu",
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE3_MASK,
+ HNAE3_FW_VERSION_BYTE3_SHIFT),
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE2_MASK,
+ HNAE3_FW_VERSION_BYTE2_SHIFT),
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE1_MASK,
+ HNAE3_FW_VERSION_BYTE1_SHIFT),
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE0_MASK,
+ HNAE3_FW_VERSION_BYTE0_SHIFT));
+
+ return devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW,
+ version_str);
+}
+
+static int hclge_devlink_reload_down(struct devlink *devlink, bool netns_change,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
+ struct netlink_ext_ack *extack)
+{
+ struct hclge_devlink_priv *priv = devlink_priv(devlink);
+ struct hclge_dev *hdev = priv->hdev;
+ struct hnae3_handle *h = &hdev->vport->nic;
+ struct pci_dev *pdev = hdev->pdev;
+ int ret;
+
+ if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) {
+ dev_err(&pdev->dev, "reset is handling\n");
+ return -EBUSY;
+ }
+
+ switch (action) {
+ case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
+ rtnl_lock();
+ ret = hdev->nic_client->ops->reset_notify(h, HNAE3_DOWN_CLIENT);
+ if (ret) {
+ rtnl_unlock();
+ return ret;
+ }
+
+ ret = hdev->nic_client->ops->reset_notify(h,
+ HNAE3_UNINIT_CLIENT);
+ rtnl_unlock();
+ return ret;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int hclge_devlink_reload_up(struct devlink *devlink,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
+ u32 *actions_performed,
+ struct netlink_ext_ack *extack)
+{
+ struct hclge_devlink_priv *priv = devlink_priv(devlink);
+ struct hclge_dev *hdev = priv->hdev;
+ struct hnae3_handle *h = &hdev->vport->nic;
+ int ret;
+
+ *actions_performed = BIT(action);
+ switch (action) {
+ case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
+ rtnl_lock();
+ ret = hdev->nic_client->ops->reset_notify(h, HNAE3_INIT_CLIENT);
+ if (ret) {
+ rtnl_unlock();
+ return ret;
+ }
+
+ ret = hdev->nic_client->ops->reset_notify(h, HNAE3_UP_CLIENT);
+ rtnl_unlock();
+ return ret;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct devlink_ops hclge_devlink_ops = {
+ .info_get = hclge_devlink_info_get,
+ .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT),
+ .reload_down = hclge_devlink_reload_down,
+ .reload_up = hclge_devlink_reload_up,
+};
+
+int hclge_devlink_init(struct hclge_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ struct hclge_devlink_priv *priv;
+ struct devlink *devlink;
+
+ devlink = devlink_alloc(&hclge_devlink_ops,
+ sizeof(struct hclge_devlink_priv), &pdev->dev);
+ if (!devlink)
+ return -ENOMEM;
+
+ priv = devlink_priv(devlink);
+ priv->hdev = hdev;
+ hdev->devlink = devlink;
+
+ devlink_register(devlink);
+ return 0;
+}
+
+void hclge_devlink_uninit(struct hclge_dev *hdev)
+{
+ struct devlink *devlink = hdev->devlink;
+
+ devlink_unregister(devlink);
+
+ devlink_free(devlink);
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.h
new file mode 100644
index 0000000000..918be04507
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2021 Hisilicon Limited. */
+
+#ifndef __HCLGE_DEVLINK_H
+#define __HCLGE_DEVLINK_H
+
+#include "hclge_main.h"
+
+struct hclge_devlink_priv {
+ struct hclge_dev *hdev;
+};
+
+int hclge_devlink_init(struct hclge_dev *hdev);
+void hclge_devlink_uninit(struct hclge_dev *hdev);
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
new file mode 100644
index 0000000000..3f35227ef1
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -0,0 +1,2942 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2016-2017 Hisilicon Limited. */
+
+#include <linux/sched/clock.h>
+
+#include "hclge_err.h"
+
+static const struct hclge_hw_error hclge_imp_tcm_ecc_int[] = {
+ {
+ .int_msk = BIT(1),
+ .msg = "imp_itcm0_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(3),
+ .msg = "imp_itcm1_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(5),
+ .msg = "imp_itcm2_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(7),
+ .msg = "imp_itcm3_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(9),
+ .msg = "imp_dtcm0_mem0_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(11),
+ .msg = "imp_dtcm0_mem1_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(13),
+ .msg = "imp_dtcm1_mem0_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(15),
+ .msg = "imp_dtcm1_mem1_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(17),
+ .msg = "imp_itcm4_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ /* sentinel */
+ }
+};
+
+static const struct hclge_hw_error hclge_cmdq_nic_mem_ecc_int[] = {
+ {
+ .int_msk = BIT(1),
+ .msg = "cmdq_nic_rx_depth_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(3),
+ .msg = "cmdq_nic_tx_depth_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(5),
+ .msg = "cmdq_nic_rx_tail_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(7),
+ .msg = "cmdq_nic_tx_tail_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(9),
+ .msg = "cmdq_nic_rx_head_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(11),
+ .msg = "cmdq_nic_tx_head_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(13),
+ .msg = "cmdq_nic_rx_addr_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(15),
+ .msg = "cmdq_nic_tx_addr_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(17),
+ .msg = "cmdq_rocee_rx_depth_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(19),
+ .msg = "cmdq_rocee_tx_depth_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(21),
+ .msg = "cmdq_rocee_rx_tail_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(23),
+ .msg = "cmdq_rocee_tx_tail_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(25),
+ .msg = "cmdq_rocee_rx_head_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(27),
+ .msg = "cmdq_rocee_tx_head_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(29),
+ .msg = "cmdq_rocee_rx_addr_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(31),
+ .msg = "cmdq_rocee_tx_addr_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ /* sentinel */
+ }
+};
+
+static const struct hclge_hw_error hclge_tqp_int_ecc_int[] = {
+ {
+ .int_msk = BIT(6),
+ .msg = "tqp_int_cfg_even_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(7),
+ .msg = "tqp_int_cfg_odd_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(8),
+ .msg = "tqp_int_ctrl_even_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(9),
+ .msg = "tqp_int_ctrl_odd_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(10),
+ .msg = "tx_que_scan_int_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(11),
+ .msg = "rx_que_scan_int_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ /* sentinel */
+ }
+};
+
+static const struct hclge_hw_error hclge_msix_sram_ecc_int[] = {
+ {
+ .int_msk = BIT(1),
+ .msg = "msix_nic_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(3),
+ .msg = "msix_rocee_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ /* sentinel */
+ }
+};
+
+static const struct hclge_hw_error hclge_igu_int[] = {
+ {
+ .int_msk = BIT(0),
+ .msg = "igu_rx_buf0_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(2),
+ .msg = "igu_rx_buf1_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ /* sentinel */
+ }
+};
+
+static const struct hclge_hw_error hclge_igu_egu_tnl_int[] = {
+ {
+ .int_msk = BIT(0),
+ .msg = "rx_buf_overflow",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(1),
+ .msg = "rx_stp_fifo_overflow",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(2),
+ .msg = "rx_stp_fifo_underflow",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(3),
+ .msg = "tx_buf_overflow",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(4),
+ .msg = "tx_buf_underrun",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(5),
+ .msg = "rx_stp_buf_overflow",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ /* sentinel */
+ }
+};
+
+static const struct hclge_hw_error hclge_ncsi_err_int[] = {
+ {
+ .int_msk = BIT(1),
+ .msg = "ncsi_tx_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ /* sentinel */
+ }
+};
+
+static const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st1[] = {
+ {
+ .int_msk = BIT(0),
+ .msg = "vf_vlan_ad_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(1),
+ .msg = "umv_mcast_group_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(2),
+ .msg = "umv_key_mem0_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(3),
+ .msg = "umv_key_mem1_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(4),
+ .msg = "umv_key_mem2_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(5),
+ .msg = "umv_key_mem3_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(6),
+ .msg = "umv_ad_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(7),
+ .msg = "rss_tc_mode_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(8),
+ .msg = "rss_idt_mem0_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(9),
+ .msg = "rss_idt_mem1_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(10),
+ .msg = "rss_idt_mem2_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(11),
+ .msg = "rss_idt_mem3_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(12),
+ .msg = "rss_idt_mem4_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(13),
+ .msg = "rss_idt_mem5_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(14),
+ .msg = "rss_idt_mem6_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(15),
+ .msg = "rss_idt_mem7_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(16),
+ .msg = "rss_idt_mem8_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(17),
+ .msg = "rss_idt_mem9_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(18),
+ .msg = "rss_idt_mem10_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(19),
+ .msg = "rss_idt_mem11_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(20),
+ .msg = "rss_idt_mem12_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(21),
+ .msg = "rss_idt_mem13_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(22),
+ .msg = "rss_idt_mem14_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(23),
+ .msg = "rss_idt_mem15_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(24),
+ .msg = "port_vlan_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(25),
+ .msg = "mcast_linear_table_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(26),
+ .msg = "mcast_result_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(27),
+ .msg = "flow_director_ad_mem0_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(28),
+ .msg = "flow_director_ad_mem1_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(29),
+ .msg = "rx_vlan_tag_memory_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(30),
+ .msg = "Tx_UP_mapping_config_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ /* sentinel */
+ }
+};
+
+static const struct hclge_hw_error hclge_ppp_pf_abnormal_int[] = {
+ {
+ .int_msk = BIT(0),
+ .msg = "tx_vlan_tag_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(1),
+ .msg = "rss_list_tc_unassigned_queue_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ /* sentinel */
+ }
+};
+
+static const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st3[] = {
+ {
+ .int_msk = BIT(0),
+ .msg = "hfs_fifo_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(1),
+ .msg = "rslt_descr_fifo_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(2),
+ .msg = "tx_vlan_tag_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(3),
+ .msg = "FD_CN0_memory_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(4),
+ .msg = "FD_CN1_memory_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(5),
+ .msg = "GRO_AD_memory_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ /* sentinel */
+ }
+};
+
+static const struct hclge_hw_error hclge_tm_sch_rint[] = {
+ {
+ .int_msk = BIT(1),
+ .msg = "tm_sch_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(2),
+ .msg = "tm_sch_port_shap_sub_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(3),
+ .msg = "tm_sch_port_shap_sub_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(4),
+ .msg = "tm_sch_pg_pshap_sub_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(5),
+ .msg = "tm_sch_pg_pshap_sub_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(6),
+ .msg = "tm_sch_pg_cshap_sub_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(7),
+ .msg = "tm_sch_pg_cshap_sub_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(8),
+ .msg = "tm_sch_pri_pshap_sub_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(9),
+ .msg = "tm_sch_pri_pshap_sub_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(10),
+ .msg = "tm_sch_pri_cshap_sub_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(11),
+ .msg = "tm_sch_pri_cshap_sub_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(12),
+ .msg = "tm_sch_port_shap_offset_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(13),
+ .msg = "tm_sch_port_shap_offset_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(14),
+ .msg = "tm_sch_pg_pshap_offset_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(15),
+ .msg = "tm_sch_pg_pshap_offset_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(16),
+ .msg = "tm_sch_pg_cshap_offset_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(17),
+ .msg = "tm_sch_pg_cshap_offset_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(18),
+ .msg = "tm_sch_pri_pshap_offset_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(19),
+ .msg = "tm_sch_pri_pshap_offset_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(20),
+ .msg = "tm_sch_pri_cshap_offset_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(21),
+ .msg = "tm_sch_pri_cshap_offset_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(22),
+ .msg = "tm_sch_rq_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(23),
+ .msg = "tm_sch_rq_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(24),
+ .msg = "tm_sch_nq_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(25),
+ .msg = "tm_sch_nq_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(26),
+ .msg = "tm_sch_roce_up_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(27),
+ .msg = "tm_sch_roce_up_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(28),
+ .msg = "tm_sch_rcb_byte_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(29),
+ .msg = "tm_sch_rcb_byte_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(30),
+ .msg = "tm_sch_ssu_byte_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(31),
+ .msg = "tm_sch_ssu_byte_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ /* sentinel */
+ }
+};
+
+static const struct hclge_hw_error hclge_qcn_fifo_rint[] = {
+ {
+ .int_msk = BIT(0),
+ .msg = "qcn_shap_gp0_sch_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(1),
+ .msg = "qcn_shap_gp0_sch_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(2),
+ .msg = "qcn_shap_gp1_sch_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(3),
+ .msg = "qcn_shap_gp1_sch_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(4),
+ .msg = "qcn_shap_gp2_sch_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(5),
+ .msg = "qcn_shap_gp2_sch_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(6),
+ .msg = "qcn_shap_gp3_sch_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(7),
+ .msg = "qcn_shap_gp3_sch_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(8),
+ .msg = "qcn_shap_gp0_offset_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(9),
+ .msg = "qcn_shap_gp0_offset_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(10),
+ .msg = "qcn_shap_gp1_offset_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(11),
+ .msg = "qcn_shap_gp1_offset_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(12),
+ .msg = "qcn_shap_gp2_offset_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(13),
+ .msg = "qcn_shap_gp2_offset_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(14),
+ .msg = "qcn_shap_gp3_offset_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(15),
+ .msg = "qcn_shap_gp3_offset_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(16),
+ .msg = "qcn_byte_info_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(17),
+ .msg = "qcn_byte_info_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ /* sentinel */
+ }
+};
+
+static const struct hclge_hw_error hclge_qcn_ecc_rint[] = {
+ {
+ .int_msk = BIT(1),
+ .msg = "qcn_byte_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(3),
+ .msg = "qcn_time_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(5),
+ .msg = "qcn_fb_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(7),
+ .msg = "qcn_link_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(9),
+ .msg = "qcn_rate_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(11),
+ .msg = "qcn_tmplt_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(13),
+ .msg = "qcn_shap_cfg_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(15),
+ .msg = "qcn_gp0_barrel_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(17),
+ .msg = "qcn_gp1_barrel_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(19),
+ .msg = "qcn_gp2_barrel_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(21),
+ .msg = "qcn_gp3_barral_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ /* sentinel */
+ }
+};
+
+static const struct hclge_hw_error hclge_mac_afifo_tnl_int[] = {
+ {
+ .int_msk = BIT(0),
+ .msg = "egu_cge_afifo_ecc_1bit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(1),
+ .msg = "egu_cge_afifo_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(2),
+ .msg = "egu_lge_afifo_ecc_1bit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(3),
+ .msg = "egu_lge_afifo_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(4),
+ .msg = "cge_igu_afifo_ecc_1bit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(5),
+ .msg = "cge_igu_afifo_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(6),
+ .msg = "lge_igu_afifo_ecc_1bit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(7),
+ .msg = "lge_igu_afifo_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(8),
+ .msg = "cge_igu_afifo_overflow_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(9),
+ .msg = "lge_igu_afifo_overflow_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(10),
+ .msg = "egu_cge_afifo_underrun_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(11),
+ .msg = "egu_lge_afifo_underrun_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(12),
+ .msg = "egu_ge_afifo_underrun_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(13),
+ .msg = "ge_igu_afifo_overflow_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ /* sentinel */
+ }
+};
+
+static const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st2[] = {
+ {
+ .int_msk = BIT(13),
+ .msg = "rpu_rx_pkt_bit32_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(14),
+ .msg = "rpu_rx_pkt_bit33_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(15),
+ .msg = "rpu_rx_pkt_bit34_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(16),
+ .msg = "rpu_rx_pkt_bit35_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(17),
+ .msg = "rcb_tx_ring_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(18),
+ .msg = "rcb_rx_ring_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(19),
+ .msg = "rcb_tx_fbd_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(20),
+ .msg = "rcb_rx_ebd_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(21),
+ .msg = "rcb_tso_info_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(22),
+ .msg = "rcb_tx_int_info_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(23),
+ .msg = "rcb_rx_int_info_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(24),
+ .msg = "tpu_tx_pkt_0_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(25),
+ .msg = "tpu_tx_pkt_1_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(26),
+ .msg = "rd_bus_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(27),
+ .msg = "wr_bus_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(28),
+ .msg = "reg_search_miss",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(29),
+ .msg = "rx_q_search_miss",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(30),
+ .msg = "ooo_ecc_err_detect",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(31),
+ .msg = "ooo_ecc_err_multpl",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ /* sentinel */
+ }
+};
+
+static const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st3[] = {
+ {
+ .int_msk = BIT(4),
+ .msg = "gro_bd_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(5),
+ .msg = "gro_context_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(6),
+ .msg = "rx_stash_cfg_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(7),
+ .msg = "axi_rd_fbd_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ /* sentinel */
+ }
+};
+
+static const struct hclge_hw_error hclge_ppu_pf_abnormal_int[] = {
+ {
+ .int_msk = BIT(0),
+ .msg = "over_8bd_no_fe",
+ .reset_level = HNAE3_FUNC_RESET
+ }, {
+ .int_msk = BIT(1),
+ .msg = "tso_mss_cmp_min_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(2),
+ .msg = "tso_mss_cmp_max_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(3),
+ .msg = "tx_rd_fbd_poison",
+ .reset_level = HNAE3_FUNC_RESET
+ }, {
+ .int_msk = BIT(4),
+ .msg = "rx_rd_ebd_poison",
+ .reset_level = HNAE3_FUNC_RESET
+ }, {
+ .int_msk = BIT(5),
+ .msg = "buf_wait_timeout",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ /* sentinel */
+ }
+};
+
+static const struct hclge_hw_error hclge_ssu_com_err_int[] = {
+ {
+ .int_msk = BIT(0),
+ .msg = "buf_sum_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(1),
+ .msg = "ppp_mb_num_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(2),
+ .msg = "ppp_mbid_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(3),
+ .msg = "ppp_rlt_mac_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(4),
+ .msg = "ppp_rlt_host_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(5),
+ .msg = "cks_edit_position_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(6),
+ .msg = "cks_edit_condition_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(7),
+ .msg = "vlan_edit_condition_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(8),
+ .msg = "vlan_num_ot_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(9),
+ .msg = "vlan_num_in_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ /* sentinel */
+ }
+};
+
+#define HCLGE_SSU_MEM_ECC_ERR(x) \
+{ \
+ .int_msk = BIT(x), \
+ .msg = "ssu_mem" #x "_ecc_mbit_err", \
+ .reset_level = HNAE3_GLOBAL_RESET \
+}
+
+static const struct hclge_hw_error hclge_ssu_mem_ecc_err_int[] = {
+ HCLGE_SSU_MEM_ECC_ERR(0),
+ HCLGE_SSU_MEM_ECC_ERR(1),
+ HCLGE_SSU_MEM_ECC_ERR(2),
+ HCLGE_SSU_MEM_ECC_ERR(3),
+ HCLGE_SSU_MEM_ECC_ERR(4),
+ HCLGE_SSU_MEM_ECC_ERR(5),
+ HCLGE_SSU_MEM_ECC_ERR(6),
+ HCLGE_SSU_MEM_ECC_ERR(7),
+ HCLGE_SSU_MEM_ECC_ERR(8),
+ HCLGE_SSU_MEM_ECC_ERR(9),
+ HCLGE_SSU_MEM_ECC_ERR(10),
+ HCLGE_SSU_MEM_ECC_ERR(11),
+ HCLGE_SSU_MEM_ECC_ERR(12),
+ HCLGE_SSU_MEM_ECC_ERR(13),
+ HCLGE_SSU_MEM_ECC_ERR(14),
+ HCLGE_SSU_MEM_ECC_ERR(15),
+ HCLGE_SSU_MEM_ECC_ERR(16),
+ HCLGE_SSU_MEM_ECC_ERR(17),
+ HCLGE_SSU_MEM_ECC_ERR(18),
+ HCLGE_SSU_MEM_ECC_ERR(19),
+ HCLGE_SSU_MEM_ECC_ERR(20),
+ HCLGE_SSU_MEM_ECC_ERR(21),
+ HCLGE_SSU_MEM_ECC_ERR(22),
+ HCLGE_SSU_MEM_ECC_ERR(23),
+ HCLGE_SSU_MEM_ECC_ERR(24),
+ HCLGE_SSU_MEM_ECC_ERR(25),
+ HCLGE_SSU_MEM_ECC_ERR(26),
+ HCLGE_SSU_MEM_ECC_ERR(27),
+ HCLGE_SSU_MEM_ECC_ERR(28),
+ HCLGE_SSU_MEM_ECC_ERR(29),
+ HCLGE_SSU_MEM_ECC_ERR(30),
+ HCLGE_SSU_MEM_ECC_ERR(31),
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_ssu_port_based_err_int[] = {
+ {
+ .int_msk = BIT(0),
+ .msg = "roc_pkt_without_key_port",
+ .reset_level = HNAE3_FUNC_RESET
+ }, {
+ .int_msk = BIT(1),
+ .msg = "tpu_pkt_without_key_port",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(2),
+ .msg = "igu_pkt_without_key_port",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(3),
+ .msg = "roc_eof_mis_match_port",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(4),
+ .msg = "tpu_eof_mis_match_port",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(5),
+ .msg = "igu_eof_mis_match_port",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(6),
+ .msg = "roc_sof_mis_match_port",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(7),
+ .msg = "tpu_sof_mis_match_port",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(8),
+ .msg = "igu_sof_mis_match_port",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(11),
+ .msg = "ets_rd_int_rx_port",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(12),
+ .msg = "ets_wr_int_rx_port",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(13),
+ .msg = "ets_rd_int_tx_port",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(14),
+ .msg = "ets_wr_int_tx_port",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ /* sentinel */
+ }
+};
+
+static const struct hclge_hw_error hclge_ssu_fifo_overflow_int[] = {
+ {
+ .int_msk = BIT(0),
+ .msg = "ig_mac_inf_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(1),
+ .msg = "ig_host_inf_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(2),
+ .msg = "ig_roc_buf_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(3),
+ .msg = "ig_host_data_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(4),
+ .msg = "ig_host_key_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(5),
+ .msg = "tx_qcn_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(6),
+ .msg = "rx_qcn_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(7),
+ .msg = "tx_pf_rd_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(8),
+ .msg = "rx_pf_rd_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(9),
+ .msg = "qm_eof_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(10),
+ .msg = "mb_rlt_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(11),
+ .msg = "dup_uncopy_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(12),
+ .msg = "dup_cnt_rd_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(13),
+ .msg = "dup_cnt_drop_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(14),
+ .msg = "dup_cnt_wrb_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(15),
+ .msg = "host_cmd_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(16),
+ .msg = "mac_cmd_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(17),
+ .msg = "host_cmd_bitmap_empty_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(18),
+ .msg = "mac_cmd_bitmap_empty_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(19),
+ .msg = "dup_bitmap_empty_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(20),
+ .msg = "out_queue_bitmap_empty_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(21),
+ .msg = "bank2_bitmap_empty_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(22),
+ .msg = "bank1_bitmap_empty_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(23),
+ .msg = "bank0_bitmap_empty_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ /* sentinel */
+ }
+};
+
+static const struct hclge_hw_error hclge_ssu_ets_tcg_int[] = {
+ {
+ .int_msk = BIT(0),
+ .msg = "ets_rd_int_rx_tcg",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(1),
+ .msg = "ets_wr_int_rx_tcg",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(2),
+ .msg = "ets_rd_int_tx_tcg",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(3),
+ .msg = "ets_wr_int_tx_tcg",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ /* sentinel */
+ }
+};
+
+static const struct hclge_hw_error hclge_ssu_port_based_pf_int[] = {
+ {
+ .int_msk = BIT(0),
+ .msg = "roc_pkt_without_key_port",
+ .reset_level = HNAE3_FUNC_RESET
+ }, {
+ .int_msk = BIT(9),
+ .msg = "low_water_line_err_port",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(10),
+ .msg = "hi_water_line_err_port",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ /* sentinel */
+ }
+};
+
+static const struct hclge_hw_error hclge_rocee_qmm_ovf_err_int[] = {
+ {
+ .int_msk = 0,
+ .msg = "rocee qmm ovf: sgid invalid err"
+ }, {
+ .int_msk = 0x4,
+ .msg = "rocee qmm ovf: sgid ovf err"
+ }, {
+ .int_msk = 0x8,
+ .msg = "rocee qmm ovf: smac invalid err"
+ }, {
+ .int_msk = 0xC,
+ .msg = "rocee qmm ovf: smac ovf err"
+ }, {
+ .int_msk = 0x10,
+ .msg = "rocee qmm ovf: cqc invalid err"
+ }, {
+ .int_msk = 0x11,
+ .msg = "rocee qmm ovf: cqc ovf err"
+ }, {
+ .int_msk = 0x12,
+ .msg = "rocee qmm ovf: cqc hopnum err"
+ }, {
+ .int_msk = 0x13,
+ .msg = "rocee qmm ovf: cqc ba0 err"
+ }, {
+ .int_msk = 0x14,
+ .msg = "rocee qmm ovf: srqc invalid err"
+ }, {
+ .int_msk = 0x15,
+ .msg = "rocee qmm ovf: srqc ovf err"
+ }, {
+ .int_msk = 0x16,
+ .msg = "rocee qmm ovf: srqc hopnum err"
+ }, {
+ .int_msk = 0x17,
+ .msg = "rocee qmm ovf: srqc ba0 err"
+ }, {
+ .int_msk = 0x18,
+ .msg = "rocee qmm ovf: mpt invalid err"
+ }, {
+ .int_msk = 0x19,
+ .msg = "rocee qmm ovf: mpt ovf err"
+ }, {
+ .int_msk = 0x1A,
+ .msg = "rocee qmm ovf: mpt hopnum err"
+ }, {
+ .int_msk = 0x1B,
+ .msg = "rocee qmm ovf: mpt ba0 err"
+ }, {
+ .int_msk = 0x1C,
+ .msg = "rocee qmm ovf: qpc invalid err"
+ }, {
+ .int_msk = 0x1D,
+ .msg = "rocee qmm ovf: qpc ovf err"
+ }, {
+ .int_msk = 0x1E,
+ .msg = "rocee qmm ovf: qpc hopnum err"
+ }, {
+ .int_msk = 0x1F,
+ .msg = "rocee qmm ovf: qpc ba0 err"
+ }, {
+ /* sentinel */
+ }
+};
+
+static const struct hclge_hw_module_id hclge_hw_module_id_st[] = {
+ {
+ .module_id = MODULE_NONE,
+ .msg = "MODULE_NONE"
+ }, {
+ .module_id = MODULE_BIOS_COMMON,
+ .msg = "MODULE_BIOS_COMMON"
+ }, {
+ .module_id = MODULE_GE,
+ .msg = "MODULE_GE"
+ }, {
+ .module_id = MODULE_IGU_EGU,
+ .msg = "MODULE_IGU_EGU"
+ }, {
+ .module_id = MODULE_LGE,
+ .msg = "MODULE_LGE"
+ }, {
+ .module_id = MODULE_NCSI,
+ .msg = "MODULE_NCSI"
+ }, {
+ .module_id = MODULE_PPP,
+ .msg = "MODULE_PPP"
+ }, {
+ .module_id = MODULE_QCN,
+ .msg = "MODULE_QCN"
+ }, {
+ .module_id = MODULE_RCB_RX,
+ .msg = "MODULE_RCB_RX"
+ }, {
+ .module_id = MODULE_RTC,
+ .msg = "MODULE_RTC"
+ }, {
+ .module_id = MODULE_SSU,
+ .msg = "MODULE_SSU"
+ }, {
+ .module_id = MODULE_TM,
+ .msg = "MODULE_TM"
+ }, {
+ .module_id = MODULE_RCB_TX,
+ .msg = "MODULE_RCB_TX"
+ }, {
+ .module_id = MODULE_TXDMA,
+ .msg = "MODULE_TXDMA"
+ }, {
+ .module_id = MODULE_MASTER,
+ .msg = "MODULE_MASTER"
+ }, {
+ .module_id = MODULE_HIMAC,
+ .msg = "MODULE_HIMAC"
+ }, {
+ .module_id = MODULE_ROCEE_TOP,
+ .msg = "MODULE_ROCEE_TOP"
+ }, {
+ .module_id = MODULE_ROCEE_TIMER,
+ .msg = "MODULE_ROCEE_TIMER"
+ }, {
+ .module_id = MODULE_ROCEE_MDB,
+ .msg = "MODULE_ROCEE_MDB"
+ }, {
+ .module_id = MODULE_ROCEE_TSP,
+ .msg = "MODULE_ROCEE_TSP"
+ }, {
+ .module_id = MODULE_ROCEE_TRP,
+ .msg = "MODULE_ROCEE_TRP"
+ }, {
+ .module_id = MODULE_ROCEE_SCC,
+ .msg = "MODULE_ROCEE_SCC"
+ }, {
+ .module_id = MODULE_ROCEE_CAEP,
+ .msg = "MODULE_ROCEE_CAEP"
+ }, {
+ .module_id = MODULE_ROCEE_GEN_AC,
+ .msg = "MODULE_ROCEE_GEN_AC"
+ }, {
+ .module_id = MODULE_ROCEE_QMM,
+ .msg = "MODULE_ROCEE_QMM"
+ }, {
+ .module_id = MODULE_ROCEE_LSAN,
+ .msg = "MODULE_ROCEE_LSAN"
+ }
+};
+
+static const struct hclge_hw_type_id hclge_hw_type_id_st[] = {
+ {
+ .type_id = NONE_ERROR,
+ .msg = "none_error"
+ }, {
+ .type_id = FIFO_ERROR,
+ .msg = "fifo_error"
+ }, {
+ .type_id = MEMORY_ERROR,
+ .msg = "memory_error"
+ }, {
+ .type_id = POISON_ERROR,
+ .msg = "poison_error"
+ }, {
+ .type_id = MSIX_ECC_ERROR,
+ .msg = "msix_ecc_error"
+ }, {
+ .type_id = TQP_INT_ECC_ERROR,
+ .msg = "tqp_int_ecc_error"
+ }, {
+ .type_id = PF_ABNORMAL_INT_ERROR,
+ .msg = "pf_abnormal_int_error"
+ }, {
+ .type_id = MPF_ABNORMAL_INT_ERROR,
+ .msg = "mpf_abnormal_int_error"
+ }, {
+ .type_id = COMMON_ERROR,
+ .msg = "common_error"
+ }, {
+ .type_id = PORT_ERROR,
+ .msg = "port_error"
+ }, {
+ .type_id = ETS_ERROR,
+ .msg = "ets_error"
+ }, {
+ .type_id = NCSI_ERROR,
+ .msg = "ncsi_error"
+ }, {
+ .type_id = GLB_ERROR,
+ .msg = "glb_error"
+ }, {
+ .type_id = LINK_ERROR,
+ .msg = "link_error"
+ }, {
+ .type_id = PTP_ERROR,
+ .msg = "ptp_error"
+ }, {
+ .type_id = ROCEE_NORMAL_ERR,
+ .msg = "rocee_normal_error"
+ }, {
+ .type_id = ROCEE_OVF_ERR,
+ .msg = "rocee_ovf_error"
+ }, {
+ .type_id = ROCEE_BUS_ERR,
+ .msg = "rocee_bus_error"
+ },
+};
+
+static void hclge_log_error(struct device *dev, char *reg,
+ const struct hclge_hw_error *err,
+ u32 err_sts, unsigned long *reset_requests)
+{
+ while (err->msg) {
+ if (err->int_msk & err_sts) {
+ dev_err(dev, "%s %s found [error status=0x%x]\n",
+ reg, err->msg, err_sts);
+ if (err->reset_level &&
+ err->reset_level != HNAE3_NONE_RESET)
+ set_bit(err->reset_level, reset_requests);
+ }
+ err++;
+ }
+}
+
+/* hclge_cmd_query_error: read the error information
+ * @hdev: pointer to struct hclge_dev
+ * @desc: descriptor for describing the command
+ * @cmd: command opcode
+ * @flag: flag for extended command structure
+ *
+ * This function query the error info from hw register/s using command
+ */
+static int hclge_cmd_query_error(struct hclge_dev *hdev,
+ struct hclge_desc *desc, u32 cmd, u16 flag)
+{
+ struct device *dev = &hdev->pdev->dev;
+ int desc_num = 1;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc[0], cmd, true);
+ if (flag) {
+ desc[0].flag |= cpu_to_le16(flag);
+ hclge_cmd_setup_basic_desc(&desc[1], cmd, true);
+ desc_num = 2;
+ }
+
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], desc_num);
+ if (ret)
+ dev_err(dev, "query error cmd failed (%d)\n", ret);
+
+ return ret;
+}
+
+static int hclge_clear_mac_tnl_int(struct hclge_dev *hdev)
+{
+ struct hclge_desc desc;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CLEAR_MAC_TNL_INT, false);
+ desc.data[0] = cpu_to_le32(HCLGE_MAC_TNL_INT_CLR);
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
+static int hclge_config_common_hw_err_int(struct hclge_dev *hdev, bool en)
+{
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc[2];
+ int ret;
+
+ /* configure common error interrupts */
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_COMMON_ECC_INT_CFG, false);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_COMMON_ECC_INT_CFG, false);
+
+ if (en) {
+ desc[0].data[0] = cpu_to_le32(HCLGE_IMP_TCM_ECC_ERR_INT_EN);
+ desc[0].data[2] = cpu_to_le32(HCLGE_CMDQ_NIC_ECC_ERR_INT_EN |
+ HCLGE_CMDQ_ROCEE_ECC_ERR_INT_EN);
+ desc[0].data[3] = cpu_to_le32(HCLGE_IMP_RD_POISON_ERR_INT_EN);
+ desc[0].data[4] = cpu_to_le32(HCLGE_TQP_ECC_ERR_INT_EN |
+ HCLGE_MSIX_SRAM_ECC_ERR_INT_EN);
+ desc[0].data[5] = cpu_to_le32(HCLGE_IMP_ITCM4_ECC_ERR_INT_EN);
+ }
+
+ desc[1].data[0] = cpu_to_le32(HCLGE_IMP_TCM_ECC_ERR_INT_EN_MASK);
+ desc[1].data[2] = cpu_to_le32(HCLGE_CMDQ_NIC_ECC_ERR_INT_EN_MASK |
+ HCLGE_CMDQ_ROCEE_ECC_ERR_INT_EN_MASK);
+ desc[1].data[3] = cpu_to_le32(HCLGE_IMP_RD_POISON_ERR_INT_EN_MASK);
+ desc[1].data[4] = cpu_to_le32(HCLGE_TQP_ECC_ERR_INT_EN_MASK |
+ HCLGE_MSIX_SRAM_ECC_ERR_INT_EN_MASK);
+ desc[1].data[5] = cpu_to_le32(HCLGE_IMP_ITCM4_ECC_ERR_INT_EN_MASK);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], 2);
+ if (ret)
+ dev_err(dev,
+ "fail(%d) to configure common err interrupts\n", ret);
+
+ return ret;
+}
+
+static int hclge_config_ncsi_hw_err_int(struct hclge_dev *hdev, bool en)
+{
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc;
+ int ret;
+
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
+ return 0;
+
+ /* configure NCSI error interrupts */
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_NCSI_INT_EN, false);
+ if (en)
+ desc.data[0] = cpu_to_le32(HCLGE_NCSI_ERR_INT_EN);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(dev,
+ "fail(%d) to configure NCSI error interrupts\n", ret);
+
+ return ret;
+}
+
+static int hclge_config_igu_egu_hw_err_int(struct hclge_dev *hdev, bool en)
+{
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc;
+ int ret;
+
+ /* configure IGU,EGU error interrupts */
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_IGU_COMMON_INT_EN, false);
+ desc.data[0] = cpu_to_le32(HCLGE_IGU_ERR_INT_TYPE);
+ if (en)
+ desc.data[0] |= cpu_to_le32(HCLGE_IGU_ERR_INT_EN);
+
+ desc.data[1] = cpu_to_le32(HCLGE_IGU_ERR_INT_EN_MASK);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(dev,
+ "fail(%d) to configure IGU common interrupts\n", ret);
+ return ret;
+ }
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_IGU_EGU_TNL_INT_EN, false);
+ if (en)
+ desc.data[0] = cpu_to_le32(HCLGE_IGU_TNL_ERR_INT_EN);
+
+ desc.data[1] = cpu_to_le32(HCLGE_IGU_TNL_ERR_INT_EN_MASK);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(dev,
+ "fail(%d) to configure IGU-EGU TNL interrupts\n", ret);
+ return ret;
+ }
+
+ ret = hclge_config_ncsi_hw_err_int(hdev, en);
+
+ return ret;
+}
+
+static int hclge_config_ppp_error_interrupt(struct hclge_dev *hdev, u32 cmd,
+ bool en)
+{
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc[2];
+ int ret;
+
+ /* configure PPP error interrupts */
+ hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], cmd, false);
+
+ if (cmd == HCLGE_PPP_CMD0_INT_CMD) {
+ if (en) {
+ desc[0].data[0] =
+ cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT0_EN);
+ desc[0].data[1] =
+ cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT1_EN);
+ desc[0].data[4] = cpu_to_le32(HCLGE_PPP_PF_ERR_INT_EN);
+ }
+
+ desc[1].data[0] =
+ cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT0_EN_MASK);
+ desc[1].data[1] =
+ cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT1_EN_MASK);
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
+ desc[1].data[2] =
+ cpu_to_le32(HCLGE_PPP_PF_ERR_INT_EN_MASK);
+ } else if (cmd == HCLGE_PPP_CMD1_INT_CMD) {
+ if (en) {
+ desc[0].data[0] =
+ cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT2_EN);
+ desc[0].data[1] =
+ cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT3_EN);
+ }
+
+ desc[1].data[0] =
+ cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT2_EN_MASK);
+ desc[1].data[1] =
+ cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT3_EN_MASK);
+ }
+
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], 2);
+ if (ret)
+ dev_err(dev, "fail(%d) to configure PPP error intr\n", ret);
+
+ return ret;
+}
+
+static int hclge_config_ppp_hw_err_int(struct hclge_dev *hdev, bool en)
+{
+ int ret;
+
+ ret = hclge_config_ppp_error_interrupt(hdev, HCLGE_PPP_CMD0_INT_CMD,
+ en);
+ if (ret)
+ return ret;
+
+ ret = hclge_config_ppp_error_interrupt(hdev, HCLGE_PPP_CMD1_INT_CMD,
+ en);
+
+ return ret;
+}
+
+static int hclge_config_tm_hw_err_int(struct hclge_dev *hdev, bool en)
+{
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc;
+ int ret;
+
+ /* configure TM SCH hw errors */
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_TM_SCH_ECC_INT_EN, false);
+ if (en)
+ desc.data[0] = cpu_to_le32(HCLGE_TM_SCH_ECC_ERR_INT_EN);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(dev, "fail(%d) to configure TM SCH errors\n", ret);
+ return ret;
+ }
+
+ /* configure TM QCN hw errors */
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_TM_QCN_MEM_INT_CFG, false);
+ desc.data[0] = cpu_to_le32(HCLGE_TM_QCN_ERR_INT_TYPE);
+ if (en) {
+ desc.data[0] |= cpu_to_le32(HCLGE_TM_QCN_FIFO_INT_EN);
+ desc.data[1] = cpu_to_le32(HCLGE_TM_QCN_MEM_ERR_INT_EN);
+ }
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(dev,
+ "fail(%d) to configure TM QCN mem errors\n", ret);
+
+ return ret;
+}
+
+static int hclge_config_mac_err_int(struct hclge_dev *hdev, bool en)
+{
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc;
+ int ret;
+
+ /* configure MAC common error interrupts */
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_MAC_COMMON_INT_EN, false);
+ if (en)
+ desc.data[0] = cpu_to_le32(HCLGE_MAC_COMMON_ERR_INT_EN);
+
+ desc.data[1] = cpu_to_le32(HCLGE_MAC_COMMON_ERR_INT_EN_MASK);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(dev,
+ "fail(%d) to configure MAC COMMON error intr\n", ret);
+
+ return ret;
+}
+
+int hclge_config_mac_tnl_int(struct hclge_dev *hdev, bool en)
+{
+ struct hclge_desc desc;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_TNL_INT_EN, false);
+ if (en)
+ desc.data[0] = cpu_to_le32(HCLGE_MAC_TNL_INT_EN);
+ else
+ desc.data[0] = 0;
+
+ desc.data[1] = cpu_to_le32(HCLGE_MAC_TNL_INT_EN_MASK);
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
+static int hclge_config_ppu_error_interrupts(struct hclge_dev *hdev, u32 cmd,
+ bool en)
+{
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc[2];
+ int desc_num = 1;
+ int ret;
+
+ /* configure PPU error interrupts */
+ if (cmd == HCLGE_PPU_MPF_ECC_INT_CMD) {
+ hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], cmd, false);
+ if (en) {
+ desc[0].data[0] =
+ cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT0_EN);
+ desc[0].data[1] =
+ cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT1_EN);
+ desc[1].data[3] =
+ cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT3_EN);
+ desc[1].data[4] =
+ cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT2_EN);
+ }
+
+ desc[1].data[0] =
+ cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT0_EN_MASK);
+ desc[1].data[1] =
+ cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT1_EN_MASK);
+ desc[1].data[2] =
+ cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT2_EN_MASK);
+ desc[1].data[3] |=
+ cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT3_EN_MASK);
+ desc_num = 2;
+ } else if (cmd == HCLGE_PPU_MPF_OTHER_INT_CMD) {
+ hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
+ if (en)
+ desc[0].data[0] =
+ cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT2_EN2);
+
+ desc[0].data[2] =
+ cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT2_EN2_MASK);
+ } else if (cmd == HCLGE_PPU_PF_OTHER_INT_CMD) {
+ hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
+ if (en)
+ desc[0].data[0] =
+ cpu_to_le32(HCLGE_PPU_PF_ABNORMAL_INT_EN);
+
+ desc[0].data[2] =
+ cpu_to_le32(HCLGE_PPU_PF_ABNORMAL_INT_EN_MASK);
+ } else {
+ dev_err(dev, "Invalid cmd to configure PPU error interrupts\n");
+ return -EINVAL;
+ }
+
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], desc_num);
+
+ return ret;
+}
+
+static int hclge_config_ppu_hw_err_int(struct hclge_dev *hdev, bool en)
+{
+ struct device *dev = &hdev->pdev->dev;
+ int ret;
+
+ ret = hclge_config_ppu_error_interrupts(hdev, HCLGE_PPU_MPF_ECC_INT_CMD,
+ en);
+ if (ret) {
+ dev_err(dev, "fail(%d) to configure PPU MPF ECC error intr\n",
+ ret);
+ return ret;
+ }
+
+ ret = hclge_config_ppu_error_interrupts(hdev,
+ HCLGE_PPU_MPF_OTHER_INT_CMD,
+ en);
+ if (ret) {
+ dev_err(dev, "fail(%d) to configure PPU MPF other intr\n", ret);
+ return ret;
+ }
+
+ ret = hclge_config_ppu_error_interrupts(hdev,
+ HCLGE_PPU_PF_OTHER_INT_CMD, en);
+ if (ret)
+ dev_err(dev, "fail(%d) to configure PPU PF error interrupts\n",
+ ret);
+ return ret;
+}
+
+static int hclge_config_ssu_hw_err_int(struct hclge_dev *hdev, bool en)
+{
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc[2];
+ int ret;
+
+ /* configure SSU ecc error interrupts */
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_SSU_ECC_INT_CMD, false);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_SSU_ECC_INT_CMD, false);
+ if (en) {
+ desc[0].data[0] = cpu_to_le32(HCLGE_SSU_1BIT_ECC_ERR_INT_EN);
+ desc[0].data[1] =
+ cpu_to_le32(HCLGE_SSU_MULTI_BIT_ECC_ERR_INT_EN);
+ desc[0].data[4] = cpu_to_le32(HCLGE_SSU_BIT32_ECC_ERR_INT_EN);
+ }
+
+ desc[1].data[0] = cpu_to_le32(HCLGE_SSU_1BIT_ECC_ERR_INT_EN_MASK);
+ desc[1].data[1] = cpu_to_le32(HCLGE_SSU_MULTI_BIT_ECC_ERR_INT_EN_MASK);
+ desc[1].data[2] = cpu_to_le32(HCLGE_SSU_BIT32_ECC_ERR_INT_EN_MASK);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], 2);
+ if (ret) {
+ dev_err(dev,
+ "fail(%d) to configure SSU ECC error interrupt\n", ret);
+ return ret;
+ }
+
+ /* configure SSU common error interrupts */
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_SSU_COMMON_INT_CMD, false);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_SSU_COMMON_INT_CMD, false);
+
+ if (en) {
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
+ desc[0].data[0] =
+ cpu_to_le32(HCLGE_SSU_COMMON_INT_EN);
+ else
+ desc[0].data[0] =
+ cpu_to_le32(HCLGE_SSU_COMMON_INT_EN & ~BIT(5));
+ desc[0].data[1] = cpu_to_le32(HCLGE_SSU_PORT_BASED_ERR_INT_EN);
+ desc[0].data[2] =
+ cpu_to_le32(HCLGE_SSU_FIFO_OVERFLOW_ERR_INT_EN);
+ }
+
+ desc[1].data[0] = cpu_to_le32(HCLGE_SSU_COMMON_INT_EN_MASK |
+ HCLGE_SSU_PORT_BASED_ERR_INT_EN_MASK);
+ desc[1].data[1] = cpu_to_le32(HCLGE_SSU_FIFO_OVERFLOW_ERR_INT_EN_MASK);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], 2);
+ if (ret)
+ dev_err(dev,
+ "fail(%d) to configure SSU COMMON error intr\n", ret);
+
+ return ret;
+}
+
+/* hclge_query_bd_num: query number of buffer descriptors
+ * @hdev: pointer to struct hclge_dev
+ * @is_ras: true for ras, false for msix
+ * @mpf_bd_num: number of main PF interrupt buffer descriptors
+ * @pf_bd_num: number of not main PF interrupt buffer descriptors
+ *
+ * This function querys number of mpf and pf buffer descriptors.
+ */
+static int hclge_query_bd_num(struct hclge_dev *hdev, bool is_ras,
+ u32 *mpf_bd_num, u32 *pf_bd_num)
+{
+ struct device *dev = &hdev->pdev->dev;
+ u32 mpf_min_bd_num, pf_min_bd_num;
+ enum hclge_opcode_type opcode;
+ struct hclge_desc desc_bd;
+ int ret;
+
+ if (is_ras) {
+ opcode = HCLGE_QUERY_RAS_INT_STS_BD_NUM;
+ mpf_min_bd_num = HCLGE_MPF_RAS_INT_MIN_BD_NUM;
+ pf_min_bd_num = HCLGE_PF_RAS_INT_MIN_BD_NUM;
+ } else {
+ opcode = HCLGE_QUERY_MSIX_INT_STS_BD_NUM;
+ mpf_min_bd_num = HCLGE_MPF_MSIX_INT_MIN_BD_NUM;
+ pf_min_bd_num = HCLGE_PF_MSIX_INT_MIN_BD_NUM;
+ }
+
+ hclge_cmd_setup_basic_desc(&desc_bd, opcode, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc_bd, 1);
+ if (ret) {
+ dev_err(dev, "fail(%d) to query msix int status bd num\n",
+ ret);
+ return ret;
+ }
+
+ *mpf_bd_num = le32_to_cpu(desc_bd.data[0]);
+ *pf_bd_num = le32_to_cpu(desc_bd.data[1]);
+ if (*mpf_bd_num < mpf_min_bd_num || *pf_bd_num < pf_min_bd_num) {
+ dev_err(dev, "Invalid bd num: mpf(%u), pf(%u)\n",
+ *mpf_bd_num, *pf_bd_num);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* hclge_handle_mpf_ras_error: handle all main PF RAS errors
+ * @hdev: pointer to struct hclge_dev
+ * @desc: descriptor for describing the command
+ * @num: number of extended command structures
+ *
+ * This function handles all the main PF RAS errors in the
+ * hw register/s using command.
+ */
+static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev,
+ struct hclge_desc *desc,
+ int num)
+{
+ struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
+ struct device *dev = &hdev->pdev->dev;
+ __le32 *desc_data;
+ u32 status;
+ int ret;
+
+ /* query all main PF RAS errors */
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_MPF_RAS_INT,
+ true);
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
+ if (ret) {
+ dev_err(dev, "query all mpf ras int cmd failed (%d)\n", ret);
+ return ret;
+ }
+
+ /* log HNS common errors */
+ status = le32_to_cpu(desc[0].data[0]);
+ if (status)
+ hclge_log_error(dev, "IMP_TCM_ECC_INT_STS",
+ &hclge_imp_tcm_ecc_int[0], status,
+ &ae_dev->hw_err_reset_req);
+
+ status = le32_to_cpu(desc[0].data[1]);
+ if (status)
+ hclge_log_error(dev, "CMDQ_MEM_ECC_INT_STS",
+ &hclge_cmdq_nic_mem_ecc_int[0], status,
+ &ae_dev->hw_err_reset_req);
+
+ if ((le32_to_cpu(desc[0].data[2])) & BIT(0))
+ dev_warn(dev, "imp_rd_data_poison_err found\n");
+
+ status = le32_to_cpu(desc[0].data[3]);
+ if (status)
+ hclge_log_error(dev, "TQP_INT_ECC_INT_STS",
+ &hclge_tqp_int_ecc_int[0], status,
+ &ae_dev->hw_err_reset_req);
+
+ status = le32_to_cpu(desc[0].data[4]);
+ if (status)
+ hclge_log_error(dev, "MSIX_ECC_INT_STS",
+ &hclge_msix_sram_ecc_int[0], status,
+ &ae_dev->hw_err_reset_req);
+
+ /* log SSU(Storage Switch Unit) errors */
+ desc_data = (__le32 *)&desc[2];
+ status = le32_to_cpu(*(desc_data + 2));
+ if (status)
+ hclge_log_error(dev, "SSU_ECC_MULTI_BIT_INT_0",
+ &hclge_ssu_mem_ecc_err_int[0], status,
+ &ae_dev->hw_err_reset_req);
+
+ status = le32_to_cpu(*(desc_data + 3)) & BIT(0);
+ if (status) {
+ dev_err(dev, "SSU_ECC_MULTI_BIT_INT_1 ssu_mem32_ecc_mbit_err found [error status=0x%x]\n",
+ status);
+ set_bit(HNAE3_GLOBAL_RESET, &ae_dev->hw_err_reset_req);
+ }
+
+ status = le32_to_cpu(*(desc_data + 4)) & HCLGE_SSU_COMMON_ERR_INT_MASK;
+ if (status)
+ hclge_log_error(dev, "SSU_COMMON_ERR_INT",
+ &hclge_ssu_com_err_int[0], status,
+ &ae_dev->hw_err_reset_req);
+
+ /* log IGU(Ingress Unit) errors */
+ desc_data = (__le32 *)&desc[3];
+ status = le32_to_cpu(*desc_data) & HCLGE_IGU_INT_MASK;
+ if (status)
+ hclge_log_error(dev, "IGU_INT_STS",
+ &hclge_igu_int[0], status,
+ &ae_dev->hw_err_reset_req);
+
+ /* log PPP(Programmable Packet Process) errors */
+ desc_data = (__le32 *)&desc[4];
+ status = le32_to_cpu(*(desc_data + 1));
+ if (status)
+ hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST1",
+ &hclge_ppp_mpf_abnormal_int_st1[0], status,
+ &ae_dev->hw_err_reset_req);
+
+ status = le32_to_cpu(*(desc_data + 3)) & HCLGE_PPP_MPF_INT_ST3_MASK;
+ if (status)
+ hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST3",
+ &hclge_ppp_mpf_abnormal_int_st3[0], status,
+ &ae_dev->hw_err_reset_req);
+
+ /* log PPU(RCB) errors */
+ desc_data = (__le32 *)&desc[5];
+ status = le32_to_cpu(*(desc_data + 1));
+ if (status) {
+ dev_err(dev,
+ "PPU_MPF_ABNORMAL_INT_ST1 rpu_rx_pkt_ecc_mbit_err found\n");
+ set_bit(HNAE3_GLOBAL_RESET, &ae_dev->hw_err_reset_req);
+ }
+
+ status = le32_to_cpu(*(desc_data + 2));
+ if (status)
+ hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST2",
+ &hclge_ppu_mpf_abnormal_int_st2[0], status,
+ &ae_dev->hw_err_reset_req);
+
+ status = le32_to_cpu(*(desc_data + 3)) & HCLGE_PPU_MPF_INT_ST3_MASK;
+ if (status)
+ hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST3",
+ &hclge_ppu_mpf_abnormal_int_st3[0], status,
+ &ae_dev->hw_err_reset_req);
+
+ /* log TM(Traffic Manager) errors */
+ desc_data = (__le32 *)&desc[6];
+ status = le32_to_cpu(*desc_data);
+ if (status)
+ hclge_log_error(dev, "TM_SCH_RINT",
+ &hclge_tm_sch_rint[0], status,
+ &ae_dev->hw_err_reset_req);
+
+ /* log QCN(Quantized Congestion Control) errors */
+ desc_data = (__le32 *)&desc[7];
+ status = le32_to_cpu(*desc_data) & HCLGE_QCN_FIFO_INT_MASK;
+ if (status)
+ hclge_log_error(dev, "QCN_FIFO_RINT",
+ &hclge_qcn_fifo_rint[0], status,
+ &ae_dev->hw_err_reset_req);
+
+ status = le32_to_cpu(*(desc_data + 1)) & HCLGE_QCN_ECC_INT_MASK;
+ if (status)
+ hclge_log_error(dev, "QCN_ECC_RINT",
+ &hclge_qcn_ecc_rint[0], status,
+ &ae_dev->hw_err_reset_req);
+
+ /* log NCSI errors */
+ desc_data = (__le32 *)&desc[9];
+ status = le32_to_cpu(*desc_data) & HCLGE_NCSI_ECC_INT_MASK;
+ if (status)
+ hclge_log_error(dev, "NCSI_ECC_INT_RPT",
+ &hclge_ncsi_err_int[0], status,
+ &ae_dev->hw_err_reset_req);
+
+ /* clear all main PF RAS errors */
+ hclge_comm_cmd_reuse_desc(&desc[0], false);
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
+ if (ret)
+ dev_err(dev, "clear all mpf ras int cmd failed (%d)\n", ret);
+
+ return ret;
+}
+
+/* hclge_handle_pf_ras_error: handle all PF RAS errors
+ * @hdev: pointer to struct hclge_dev
+ * @desc: descriptor for describing the command
+ * @num: number of extended command structures
+ *
+ * This function handles all the PF RAS errors in the
+ * hw registers using command.
+ */
+static int hclge_handle_pf_ras_error(struct hclge_dev *hdev,
+ struct hclge_desc *desc,
+ int num)
+{
+ struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
+ struct device *dev = &hdev->pdev->dev;
+ __le32 *desc_data;
+ u32 status;
+ int ret;
+
+ /* query all PF RAS errors */
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_PF_RAS_INT,
+ true);
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
+ if (ret) {
+ dev_err(dev, "query all pf ras int cmd failed (%d)\n", ret);
+ return ret;
+ }
+
+ /* log SSU(Storage Switch Unit) errors */
+ status = le32_to_cpu(desc[0].data[0]);
+ if (status)
+ hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT",
+ &hclge_ssu_port_based_err_int[0], status,
+ &ae_dev->hw_err_reset_req);
+
+ status = le32_to_cpu(desc[0].data[1]);
+ if (status)
+ hclge_log_error(dev, "SSU_FIFO_OVERFLOW_INT",
+ &hclge_ssu_fifo_overflow_int[0], status,
+ &ae_dev->hw_err_reset_req);
+
+ status = le32_to_cpu(desc[0].data[2]);
+ if (status)
+ hclge_log_error(dev, "SSU_ETS_TCG_INT",
+ &hclge_ssu_ets_tcg_int[0], status,
+ &ae_dev->hw_err_reset_req);
+
+ /* log IGU(Ingress Unit) EGU(Egress Unit) TNL errors */
+ desc_data = (__le32 *)&desc[1];
+ status = le32_to_cpu(*desc_data) & HCLGE_IGU_EGU_TNL_INT_MASK;
+ if (status)
+ hclge_log_error(dev, "IGU_EGU_TNL_INT_STS",
+ &hclge_igu_egu_tnl_int[0], status,
+ &ae_dev->hw_err_reset_req);
+
+ /* log PPU(RCB) errors */
+ desc_data = (__le32 *)&desc[3];
+ status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_INT_RAS_MASK;
+ if (status) {
+ hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST0",
+ &hclge_ppu_pf_abnormal_int[0], status,
+ &ae_dev->hw_err_reset_req);
+ hclge_report_hw_error(hdev, HNAE3_PPU_POISON_ERROR);
+ }
+
+ /* clear all PF RAS errors */
+ hclge_comm_cmd_reuse_desc(&desc[0], false);
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
+ if (ret)
+ dev_err(dev, "clear all pf ras int cmd failed (%d)\n", ret);
+
+ return ret;
+}
+
+static int hclge_handle_all_ras_errors(struct hclge_dev *hdev)
+{
+ u32 mpf_bd_num, pf_bd_num, bd_num;
+ struct hclge_desc *desc;
+ int ret;
+
+ /* query the number of registers in the RAS int status */
+ ret = hclge_query_bd_num(hdev, true, &mpf_bd_num, &pf_bd_num);
+ if (ret)
+ return ret;
+
+ bd_num = max_t(u32, mpf_bd_num, pf_bd_num);
+ desc = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ /* handle all main PF RAS errors */
+ ret = hclge_handle_mpf_ras_error(hdev, desc, mpf_bd_num);
+ if (ret) {
+ kfree(desc);
+ return ret;
+ }
+ memset(desc, 0, bd_num * sizeof(struct hclge_desc));
+
+ /* handle all PF RAS errors */
+ ret = hclge_handle_pf_ras_error(hdev, desc, pf_bd_num);
+ kfree(desc);
+
+ return ret;
+}
+
+static int hclge_log_rocee_axi_error(struct hclge_dev *hdev)
+{
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc[3];
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_ROCEE_AXI_RAS_INFO_CMD,
+ true);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_QUERY_ROCEE_AXI_RAS_INFO_CMD,
+ true);
+ hclge_cmd_setup_basic_desc(&desc[2], HCLGE_QUERY_ROCEE_AXI_RAS_INFO_CMD,
+ true);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], 3);
+ if (ret) {
+ dev_err(dev, "failed(%d) to query ROCEE AXI error sts\n", ret);
+ return ret;
+ }
+
+ dev_err(dev, "AXI1: %08X %08X %08X %08X %08X %08X\n",
+ le32_to_cpu(desc[0].data[0]), le32_to_cpu(desc[0].data[1]),
+ le32_to_cpu(desc[0].data[2]), le32_to_cpu(desc[0].data[3]),
+ le32_to_cpu(desc[0].data[4]), le32_to_cpu(desc[0].data[5]));
+ dev_err(dev, "AXI2: %08X %08X %08X %08X %08X %08X\n",
+ le32_to_cpu(desc[1].data[0]), le32_to_cpu(desc[1].data[1]),
+ le32_to_cpu(desc[1].data[2]), le32_to_cpu(desc[1].data[3]),
+ le32_to_cpu(desc[1].data[4]), le32_to_cpu(desc[1].data[5]));
+ dev_err(dev, "AXI3: %08X %08X %08X %08X\n",
+ le32_to_cpu(desc[2].data[0]), le32_to_cpu(desc[2].data[1]),
+ le32_to_cpu(desc[2].data[2]), le32_to_cpu(desc[2].data[3]));
+
+ return 0;
+}
+
+static int hclge_log_rocee_ecc_error(struct hclge_dev *hdev)
+{
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc[2];
+ int ret;
+
+ ret = hclge_cmd_query_error(hdev, &desc[0],
+ HCLGE_QUERY_ROCEE_ECC_RAS_INFO_CMD,
+ HCLGE_COMM_CMD_FLAG_NEXT);
+ if (ret) {
+ dev_err(dev, "failed(%d) to query ROCEE ECC error sts\n", ret);
+ return ret;
+ }
+
+ dev_err(dev, "ECC1: %08X %08X %08X %08X %08X %08X\n",
+ le32_to_cpu(desc[0].data[0]), le32_to_cpu(desc[0].data[1]),
+ le32_to_cpu(desc[0].data[2]), le32_to_cpu(desc[0].data[3]),
+ le32_to_cpu(desc[0].data[4]), le32_to_cpu(desc[0].data[5]));
+ dev_err(dev, "ECC2: %08X %08X %08X\n", le32_to_cpu(desc[1].data[0]),
+ le32_to_cpu(desc[1].data[1]), le32_to_cpu(desc[1].data[2]));
+
+ return 0;
+}
+
+static int hclge_log_rocee_ovf_error(struct hclge_dev *hdev)
+{
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc[2];
+ int ret;
+
+ /* read overflow error status */
+ ret = hclge_cmd_query_error(hdev, &desc[0], HCLGE_ROCEE_PF_RAS_INT_CMD,
+ 0);
+ if (ret) {
+ dev_err(dev, "failed(%d) to query ROCEE OVF error sts\n", ret);
+ return ret;
+ }
+
+ /* log overflow error */
+ if (le32_to_cpu(desc[0].data[0]) & HCLGE_ROCEE_OVF_ERR_INT_MASK) {
+ const struct hclge_hw_error *err;
+ u32 err_sts;
+
+ err = &hclge_rocee_qmm_ovf_err_int[0];
+ err_sts = HCLGE_ROCEE_OVF_ERR_TYPE_MASK &
+ le32_to_cpu(desc[0].data[0]);
+ while (err->msg) {
+ if (err->int_msk == err_sts) {
+ dev_err(dev, "%s [error status=0x%x] found\n",
+ err->msg,
+ le32_to_cpu(desc[0].data[0]));
+ break;
+ }
+ err++;
+ }
+ }
+
+ if (le32_to_cpu(desc[0].data[1]) & HCLGE_ROCEE_OVF_ERR_INT_MASK) {
+ dev_err(dev, "ROCEE TSP OVF [error status=0x%x] found\n",
+ le32_to_cpu(desc[0].data[1]));
+ }
+
+ if (le32_to_cpu(desc[0].data[2]) & HCLGE_ROCEE_OVF_ERR_INT_MASK) {
+ dev_err(dev, "ROCEE SCC OVF [error status=0x%x] found\n",
+ le32_to_cpu(desc[0].data[2]));
+ }
+
+ return 0;
+}
+
+static enum hnae3_reset_type
+hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev)
+{
+ enum hnae3_reset_type reset_type = HNAE3_NONE_RESET;
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc[2];
+ unsigned int status;
+ int ret;
+
+ /* read RAS error interrupt status */
+ ret = hclge_cmd_query_error(hdev, &desc[0],
+ HCLGE_QUERY_CLEAR_ROCEE_RAS_INT, 0);
+ if (ret) {
+ dev_err(dev, "failed(%d) to query ROCEE RAS INT SRC\n", ret);
+ /* reset everything for now */
+ return HNAE3_GLOBAL_RESET;
+ }
+
+ status = le32_to_cpu(desc[0].data[0]);
+ if (status & HCLGE_ROCEE_AXI_ERR_INT_MASK) {
+ if (status & HCLGE_ROCEE_RERR_INT_MASK)
+ dev_err(dev, "ROCEE RAS AXI rresp error\n");
+
+ if (status & HCLGE_ROCEE_BERR_INT_MASK)
+ dev_err(dev, "ROCEE RAS AXI bresp error\n");
+
+ reset_type = HNAE3_FUNC_RESET;
+
+ hclge_report_hw_error(hdev, HNAE3_ROCEE_AXI_RESP_ERROR);
+
+ ret = hclge_log_rocee_axi_error(hdev);
+ if (ret)
+ return HNAE3_GLOBAL_RESET;
+ }
+
+ if (status & HCLGE_ROCEE_ECC_INT_MASK) {
+ dev_err(dev, "ROCEE RAS 2bit ECC error\n");
+ reset_type = HNAE3_GLOBAL_RESET;
+
+ ret = hclge_log_rocee_ecc_error(hdev);
+ if (ret)
+ return HNAE3_GLOBAL_RESET;
+ }
+
+ if (status & HCLGE_ROCEE_OVF_INT_MASK) {
+ ret = hclge_log_rocee_ovf_error(hdev);
+ if (ret) {
+ dev_err(dev, "failed(%d) to process ovf error\n", ret);
+ /* reset everything for now */
+ return HNAE3_GLOBAL_RESET;
+ }
+ }
+
+ /* clear error status */
+ hclge_comm_cmd_reuse_desc(&desc[0], false);
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], 1);
+ if (ret) {
+ dev_err(dev, "failed(%d) to clear ROCEE RAS error\n", ret);
+ /* reset everything for now */
+ return HNAE3_GLOBAL_RESET;
+ }
+
+ return reset_type;
+}
+
+int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en)
+{
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc;
+ int ret;
+
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2 ||
+ !hnae3_dev_roce_supported(hdev))
+ return 0;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_CONFIG_ROCEE_RAS_INT_EN, false);
+ if (en) {
+ /* enable ROCEE hw error interrupts */
+ desc.data[0] = cpu_to_le32(HCLGE_ROCEE_RAS_NFE_INT_EN);
+ desc.data[1] = cpu_to_le32(HCLGE_ROCEE_RAS_CE_INT_EN);
+
+ hclge_log_and_clear_rocee_ras_error(hdev);
+ }
+ desc.data[2] = cpu_to_le32(HCLGE_ROCEE_RAS_NFE_INT_EN_MASK);
+ desc.data[3] = cpu_to_le32(HCLGE_ROCEE_RAS_CE_INT_EN_MASK);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(dev, "failed(%d) to config ROCEE RAS interrupt\n", ret);
+
+ return ret;
+}
+
+static void hclge_handle_rocee_ras_error(struct hnae3_ae_dev *ae_dev)
+{
+ struct hclge_dev *hdev = ae_dev->priv;
+ enum hnae3_reset_type reset_type;
+
+ if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+ return;
+
+ reset_type = hclge_log_and_clear_rocee_ras_error(hdev);
+ if (reset_type != HNAE3_NONE_RESET)
+ set_bit(reset_type, &ae_dev->hw_err_reset_req);
+}
+
+static const struct hclge_hw_blk hw_blk[] = {
+ {
+ .msk = BIT(0),
+ .name = "IGU_EGU",
+ .config_err_int = hclge_config_igu_egu_hw_err_int,
+ }, {
+ .msk = BIT(1),
+ .name = "PPP",
+ .config_err_int = hclge_config_ppp_hw_err_int,
+ }, {
+ .msk = BIT(2),
+ .name = "SSU",
+ .config_err_int = hclge_config_ssu_hw_err_int,
+ }, {
+ .msk = BIT(3),
+ .name = "PPU",
+ .config_err_int = hclge_config_ppu_hw_err_int,
+ }, {
+ .msk = BIT(4),
+ .name = "TM",
+ .config_err_int = hclge_config_tm_hw_err_int,
+ }, {
+ .msk = BIT(5),
+ .name = "COMMON",
+ .config_err_int = hclge_config_common_hw_err_int,
+ }, {
+ .msk = BIT(8),
+ .name = "MAC",
+ .config_err_int = hclge_config_mac_err_int,
+ }, {
+ /* sentinel */
+ }
+};
+
+static void hclge_config_all_msix_error(struct hclge_dev *hdev, bool enable)
+{
+ u32 reg_val;
+
+ reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
+
+ if (enable)
+ reg_val |= BIT(HCLGE_VECTOR0_ALL_MSIX_ERR_B);
+ else
+ reg_val &= ~BIT(HCLGE_VECTOR0_ALL_MSIX_ERR_B);
+
+ hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
+}
+
+int hclge_config_nic_hw_error(struct hclge_dev *hdev, bool state)
+{
+ const struct hclge_hw_blk *module = hw_blk;
+ int ret = 0;
+
+ hclge_config_all_msix_error(hdev, state);
+
+ while (module->name) {
+ if (module->config_err_int) {
+ ret = module->config_err_int(hdev, state);
+ if (ret)
+ return ret;
+ }
+ module++;
+ }
+
+ return ret;
+}
+
+pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev)
+{
+ struct hclge_dev *hdev = ae_dev->priv;
+ struct device *dev = &hdev->pdev->dev;
+ u32 status;
+
+ if (!test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state)) {
+ dev_err(dev,
+ "Can't recover - RAS error reported during dev init\n");
+ return PCI_ERS_RESULT_NONE;
+ }
+
+ status = hclge_read_dev(&hdev->hw, HCLGE_RAS_PF_OTHER_INT_STS_REG);
+ if (status & HCLGE_RAS_REG_NFE_MASK ||
+ status & HCLGE_RAS_REG_ROCEE_ERR_MASK)
+ ae_dev->hw_err_reset_req = 0;
+ else
+ goto out;
+
+ /* Handling Non-fatal HNS RAS errors */
+ if (status & HCLGE_RAS_REG_NFE_MASK) {
+ dev_err(dev,
+ "HNS Non-Fatal RAS error(status=0x%x) identified\n",
+ status);
+ hclge_handle_all_ras_errors(hdev);
+ }
+
+ /* Handling Non-fatal Rocee RAS errors */
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 &&
+ status & HCLGE_RAS_REG_ROCEE_ERR_MASK) {
+ dev_err(dev, "ROCEE Non-Fatal RAS error identified\n");
+ hclge_handle_rocee_ras_error(ae_dev);
+ }
+
+ if (ae_dev->hw_err_reset_req)
+ return PCI_ERS_RESULT_NEED_RESET;
+
+out:
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static int hclge_clear_hw_msix_error(struct hclge_dev *hdev,
+ struct hclge_desc *desc, bool is_mpf,
+ u32 bd_num)
+{
+ if (is_mpf)
+ desc[0].opcode =
+ cpu_to_le16(HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT);
+ else
+ desc[0].opcode = cpu_to_le16(HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT);
+
+ desc[0].flag = cpu_to_le16(HCLGE_COMM_CMD_FLAG_NO_INTR |
+ HCLGE_COMM_CMD_FLAG_IN);
+
+ return hclge_cmd_send(&hdev->hw, &desc[0], bd_num);
+}
+
+/* hclge_query_8bd_info: query information about over_8bd_nfe_err
+ * @hdev: pointer to struct hclge_dev
+ * @vf_id: Index of the virtual function with error
+ * @q_id: Physical index of the queue with error
+ *
+ * This function get specific index of queue and function which causes
+ * over_8bd_nfe_err by using command. If vf_id is 0, it means error is
+ * caused by PF instead of VF.
+ */
+static int hclge_query_over_8bd_err_info(struct hclge_dev *hdev, u16 *vf_id,
+ u16 *q_id)
+{
+ struct hclge_query_ppu_pf_other_int_dfx_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PPU_PF_OTHER_INT_DFX, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ return ret;
+
+ req = (struct hclge_query_ppu_pf_other_int_dfx_cmd *)desc.data;
+ *vf_id = le16_to_cpu(req->over_8bd_no_fe_vf_id);
+ *q_id = le16_to_cpu(req->over_8bd_no_fe_qid);
+
+ return 0;
+}
+
+/* hclge_handle_over_8bd_err: handle MSI-X error named over_8bd_nfe_err
+ * @hdev: pointer to struct hclge_dev
+ * @reset_requests: reset level that we need to trigger later
+ *
+ * over_8bd_nfe_err is a special MSI-X because it may caused by a VF, in
+ * that case, we need to trigger VF reset. Otherwise, a PF reset is needed.
+ */
+static void hclge_handle_over_8bd_err(struct hclge_dev *hdev,
+ unsigned long *reset_requests)
+{
+ struct device *dev = &hdev->pdev->dev;
+ u16 vf_id;
+ u16 q_id;
+ int ret;
+
+ ret = hclge_query_over_8bd_err_info(hdev, &vf_id, &q_id);
+ if (ret) {
+ dev_err(dev, "fail(%d) to query over_8bd_no_fe info\n",
+ ret);
+ return;
+ }
+
+ dev_err(dev, "PPU_PF_ABNORMAL_INT_ST over_8bd_no_fe found, vport(%u), queue_id(%u)\n",
+ vf_id, q_id);
+
+ if (vf_id) {
+ if (vf_id >= hdev->num_alloc_vport) {
+ dev_err(dev, "invalid vport(%u)\n", vf_id);
+ return;
+ }
+
+ /* If we need to trigger other reset whose level is higher
+ * than HNAE3_VF_FUNC_RESET, no need to trigger a VF reset
+ * here.
+ */
+ if (*reset_requests != 0)
+ return;
+
+ ret = hclge_inform_reset_assert_to_vf(&hdev->vport[vf_id]);
+ if (ret)
+ dev_err(dev, "inform reset to vport(%u) failed %d!\n",
+ vf_id, ret);
+ } else {
+ set_bit(HNAE3_FUNC_RESET, reset_requests);
+ }
+}
+
+/* hclge_handle_mpf_msix_error: handle all main PF MSI-X errors
+ * @hdev: pointer to struct hclge_dev
+ * @desc: descriptor for describing the command
+ * @mpf_bd_num: number of extended command structures
+ * @reset_requests: record of the reset level that we need
+ *
+ * This function handles all the main PF MSI-X errors in the hw register/s
+ * using command.
+ */
+static int hclge_handle_mpf_msix_error(struct hclge_dev *hdev,
+ struct hclge_desc *desc,
+ int mpf_bd_num,
+ unsigned long *reset_requests)
+{
+ struct device *dev = &hdev->pdev->dev;
+ __le32 *desc_data;
+ u32 status;
+ int ret;
+ /* query all main PF MSIx errors */
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT,
+ true);
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], mpf_bd_num);
+ if (ret) {
+ dev_err(dev, "query all mpf msix int cmd failed (%d)\n", ret);
+ return ret;
+ }
+
+ /* log MAC errors */
+ desc_data = (__le32 *)&desc[1];
+ status = le32_to_cpu(*desc_data);
+ if (status)
+ hclge_log_error(dev, "MAC_AFIFO_TNL_INT_R",
+ &hclge_mac_afifo_tnl_int[0], status,
+ reset_requests);
+
+ /* log PPU(RCB) MPF errors */
+ desc_data = (__le32 *)&desc[5];
+ status = le32_to_cpu(*(desc_data + 2)) &
+ HCLGE_PPU_MPF_INT_ST2_MSIX_MASK;
+ if (status)
+ dev_err(dev, "PPU_MPF_ABNORMAL_INT_ST2 rx_q_search_miss found [dfx status=0x%x\n]",
+ status);
+
+ /* clear all main PF MSIx errors */
+ ret = hclge_clear_hw_msix_error(hdev, desc, true, mpf_bd_num);
+ if (ret)
+ dev_err(dev, "clear all mpf msix int cmd failed (%d)\n", ret);
+
+ return ret;
+}
+
+/* hclge_handle_pf_msix_error: handle all PF MSI-X errors
+ * @hdev: pointer to struct hclge_dev
+ * @desc: descriptor for describing the command
+ * @mpf_bd_num: number of extended command structures
+ * @reset_requests: record of the reset level that we need
+ *
+ * This function handles all the PF MSI-X errors in the hw register/s using
+ * command.
+ */
+static int hclge_handle_pf_msix_error(struct hclge_dev *hdev,
+ struct hclge_desc *desc,
+ int pf_bd_num,
+ unsigned long *reset_requests)
+{
+ struct device *dev = &hdev->pdev->dev;
+ __le32 *desc_data;
+ u32 status;
+ int ret;
+
+ /* query all PF MSIx errors */
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT,
+ true);
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], pf_bd_num);
+ if (ret) {
+ dev_err(dev, "query all pf msix int cmd failed (%d)\n", ret);
+ return ret;
+ }
+
+ /* log SSU PF errors */
+ status = le32_to_cpu(desc[0].data[0]) & HCLGE_SSU_PORT_INT_MSIX_MASK;
+ if (status)
+ hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT",
+ &hclge_ssu_port_based_pf_int[0],
+ status, reset_requests);
+
+ /* read and log PPP PF errors */
+ desc_data = (__le32 *)&desc[2];
+ status = le32_to_cpu(*desc_data);
+ if (status)
+ hclge_log_error(dev, "PPP_PF_ABNORMAL_INT_ST0",
+ &hclge_ppp_pf_abnormal_int[0],
+ status, reset_requests);
+
+ /* log PPU(RCB) PF errors */
+ desc_data = (__le32 *)&desc[3];
+ status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_INT_MSIX_MASK;
+ if (status)
+ hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST",
+ &hclge_ppu_pf_abnormal_int[0],
+ status, reset_requests);
+
+ status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_OVER_8BD_ERR_MASK;
+ if (status)
+ hclge_handle_over_8bd_err(hdev, reset_requests);
+
+ /* clear all PF MSIx errors */
+ ret = hclge_clear_hw_msix_error(hdev, desc, false, pf_bd_num);
+ if (ret)
+ dev_err(dev, "clear all pf msix int cmd failed (%d)\n", ret);
+
+ return ret;
+}
+
+static int hclge_handle_all_hw_msix_error(struct hclge_dev *hdev,
+ unsigned long *reset_requests)
+{
+ u32 mpf_bd_num, pf_bd_num, bd_num;
+ struct hclge_desc *desc;
+ int ret;
+
+ /* query the number of bds for the MSIx int status */
+ ret = hclge_query_bd_num(hdev, false, &mpf_bd_num, &pf_bd_num);
+ if (ret)
+ goto out;
+
+ bd_num = max_t(u32, mpf_bd_num, pf_bd_num);
+ desc = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ ret = hclge_handle_mpf_msix_error(hdev, desc, mpf_bd_num,
+ reset_requests);
+ if (ret)
+ goto msi_error;
+
+ memset(desc, 0, bd_num * sizeof(struct hclge_desc));
+ ret = hclge_handle_pf_msix_error(hdev, desc, pf_bd_num, reset_requests);
+ if (ret)
+ goto msi_error;
+
+ ret = hclge_handle_mac_tnl(hdev);
+
+msi_error:
+ kfree(desc);
+out:
+ return ret;
+}
+
+int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
+ unsigned long *reset_requests)
+{
+ struct device *dev = &hdev->pdev->dev;
+
+ if (!test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state)) {
+ dev_err(dev,
+ "failed to handle msix error during dev init\n");
+ return -EAGAIN;
+ }
+
+ return hclge_handle_all_hw_msix_error(hdev, reset_requests);
+}
+
+int hclge_handle_mac_tnl(struct hclge_dev *hdev)
+{
+ struct hclge_mac_tnl_stats mac_tnl_stats;
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc;
+ u32 status;
+ int ret;
+
+ /* query and clear mac tnl interruptions */
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_TNL_INT, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(dev, "failed to query mac tnl int, ret = %d.\n", ret);
+ return ret;
+ }
+
+ status = le32_to_cpu(desc.data[0]);
+ if (status) {
+ /* When mac tnl interrupt occurs, we record current time and
+ * register status here in a fifo, then clear the status. So
+ * that if link status changes suddenly at some time, we can
+ * query them by debugfs.
+ */
+ mac_tnl_stats.time = local_clock();
+ mac_tnl_stats.status = status;
+ kfifo_put(&hdev->mac_tnl_log, mac_tnl_stats);
+ ret = hclge_clear_mac_tnl_int(hdev);
+ if (ret)
+ dev_err(dev, "failed to clear mac tnl int, ret = %d.\n",
+ ret);
+ }
+
+ return ret;
+}
+
+void hclge_handle_all_hns_hw_errors(struct hnae3_ae_dev *ae_dev)
+{
+ struct hclge_dev *hdev = ae_dev->priv;
+ struct device *dev = &hdev->pdev->dev;
+ u32 mpf_bd_num, pf_bd_num, bd_num;
+ struct hclge_desc *desc;
+ u32 status;
+ int ret;
+
+ ae_dev->hw_err_reset_req = 0;
+ status = hclge_read_dev(&hdev->hw, HCLGE_RAS_PF_OTHER_INT_STS_REG);
+
+ /* query the number of bds for the MSIx int status */
+ ret = hclge_query_bd_num(hdev, false, &mpf_bd_num, &pf_bd_num);
+ if (ret)
+ return;
+
+ bd_num = max_t(u32, mpf_bd_num, pf_bd_num);
+ desc = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
+ if (!desc)
+ return;
+
+ /* Clear HNS hw errors reported through msix */
+ memset(&desc[0].data[0], 0xFF, mpf_bd_num * sizeof(struct hclge_desc) -
+ HCLGE_DESC_NO_DATA_LEN);
+ ret = hclge_clear_hw_msix_error(hdev, desc, true, mpf_bd_num);
+ if (ret) {
+ dev_err(dev, "fail(%d) to clear mpf msix int during init\n",
+ ret);
+ goto msi_error;
+ }
+
+ memset(&desc[0].data[0], 0xFF, pf_bd_num * sizeof(struct hclge_desc) -
+ HCLGE_DESC_NO_DATA_LEN);
+ ret = hclge_clear_hw_msix_error(hdev, desc, false, pf_bd_num);
+ if (ret) {
+ dev_err(dev, "fail(%d) to clear pf msix int during init\n",
+ ret);
+ goto msi_error;
+ }
+
+ /* Handle Non-fatal HNS RAS errors */
+ if (status & HCLGE_RAS_REG_NFE_MASK) {
+ dev_err(dev, "HNS hw error(RAS) identified during init\n");
+ hclge_handle_all_ras_errors(hdev);
+ }
+
+msi_error:
+ kfree(desc);
+}
+
+bool hclge_find_error_source(struct hclge_dev *hdev)
+{
+ u32 msix_src_flag, hw_err_src_flag;
+
+ msix_src_flag = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
+ HCLGE_VECTOR0_REG_MSIX_MASK;
+
+ hw_err_src_flag = hclge_read_dev(&hdev->hw,
+ HCLGE_RAS_PF_OTHER_INT_STS_REG) &
+ HCLGE_RAS_REG_ERR_MASK;
+
+ return msix_src_flag || hw_err_src_flag;
+}
+
+void hclge_handle_occurred_error(struct hclge_dev *hdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+
+ if (hclge_find_error_source(hdev))
+ hclge_handle_error_info_log(ae_dev);
+}
+
+static void
+hclge_handle_error_type_reg_log(struct device *dev,
+ struct hclge_mod_err_info *mod_info,
+ struct hclge_type_reg_err_info *type_reg_info)
+{
+#define HCLGE_ERR_TYPE_MASK 0x7F
+#define HCLGE_ERR_TYPE_IS_RAS_OFFSET 7
+
+ u8 mod_id, total_module, type_id, total_type, i, is_ras;
+ u8 index_module = MODULE_NONE;
+ u8 index_type = NONE_ERROR;
+
+ mod_id = mod_info->mod_id;
+ type_id = type_reg_info->type_id & HCLGE_ERR_TYPE_MASK;
+ is_ras = type_reg_info->type_id >> HCLGE_ERR_TYPE_IS_RAS_OFFSET;
+
+ total_module = ARRAY_SIZE(hclge_hw_module_id_st);
+ total_type = ARRAY_SIZE(hclge_hw_type_id_st);
+
+ for (i = 0; i < total_module; i++) {
+ if (mod_id == hclge_hw_module_id_st[i].module_id) {
+ index_module = i;
+ break;
+ }
+ }
+
+ for (i = 0; i < total_type; i++) {
+ if (type_id == hclge_hw_type_id_st[i].type_id) {
+ index_type = i;
+ break;
+ }
+ }
+
+ if (index_module != MODULE_NONE && index_type != NONE_ERROR)
+ dev_err(dev,
+ "found %s %s, is %s error.\n",
+ hclge_hw_module_id_st[index_module].msg,
+ hclge_hw_type_id_st[index_type].msg,
+ is_ras ? "ras" : "msix");
+ else
+ dev_err(dev,
+ "unknown module[%u] or type[%u].\n", mod_id, type_id);
+
+ dev_err(dev, "reg_value:\n");
+ for (i = 0; i < type_reg_info->reg_num; i++)
+ dev_err(dev, "0x%08x\n", type_reg_info->hclge_reg[i]);
+}
+
+static void hclge_handle_error_module_log(struct hnae3_ae_dev *ae_dev,
+ const u32 *buf, u32 buf_size)
+{
+ struct hclge_type_reg_err_info *type_reg_info;
+ struct hclge_dev *hdev = ae_dev->priv;
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_mod_err_info *mod_info;
+ struct hclge_sum_err_info *sum_info;
+ u8 mod_num, err_num, i;
+ u32 offset = 0;
+
+ sum_info = (struct hclge_sum_err_info *)&buf[offset++];
+ if (sum_info->reset_type &&
+ sum_info->reset_type != HNAE3_NONE_RESET)
+ set_bit(sum_info->reset_type, &ae_dev->hw_err_reset_req);
+ mod_num = sum_info->mod_num;
+
+ while (mod_num--) {
+ if (offset >= buf_size) {
+ dev_err(dev, "The offset(%u) exceeds buf's size(%u).\n",
+ offset, buf_size);
+ return;
+ }
+ mod_info = (struct hclge_mod_err_info *)&buf[offset++];
+ err_num = mod_info->err_num;
+
+ for (i = 0; i < err_num; i++) {
+ if (offset >= buf_size) {
+ dev_err(dev,
+ "The offset(%u) exceeds buf size(%u).\n",
+ offset, buf_size);
+ return;
+ }
+
+ type_reg_info = (struct hclge_type_reg_err_info *)
+ &buf[offset++];
+ hclge_handle_error_type_reg_log(dev, mod_info,
+ type_reg_info);
+
+ offset += type_reg_info->reg_num;
+ }
+ }
+}
+
+static int hclge_query_all_err_bd_num(struct hclge_dev *hdev, u32 *bd_num)
+{
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc_bd;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc_bd, HCLGE_QUERY_ALL_ERR_BD_NUM, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc_bd, 1);
+ if (ret) {
+ dev_err(dev, "failed to query error bd_num, ret = %d.\n", ret);
+ return ret;
+ }
+
+ *bd_num = le32_to_cpu(desc_bd.data[0]);
+ if (!(*bd_num)) {
+ dev_err(dev, "The value of bd_num is 0!\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hclge_query_all_err_info(struct hclge_dev *hdev,
+ struct hclge_desc *desc, u32 bd_num)
+{
+ struct device *dev = &hdev->pdev->dev;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(desc, HCLGE_QUERY_ALL_ERR_INFO, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
+ if (ret)
+ dev_err(dev, "failed to query error info, ret = %d.\n", ret);
+
+ return ret;
+}
+
+int hclge_handle_error_info_log(struct hnae3_ae_dev *ae_dev)
+{
+ u32 bd_num, desc_len, buf_len, buf_size, i;
+ struct hclge_dev *hdev = ae_dev->priv;
+ struct hclge_desc *desc;
+ __le32 *desc_data;
+ u32 *buf;
+ int ret;
+
+ ret = hclge_query_all_err_bd_num(hdev, &bd_num);
+ if (ret)
+ goto out;
+
+ desc_len = bd_num * sizeof(struct hclge_desc);
+ desc = kzalloc(desc_len, GFP_KERNEL);
+ if (!desc) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = hclge_query_all_err_info(hdev, desc, bd_num);
+ if (ret)
+ goto err_desc;
+
+ buf_len = bd_num * sizeof(struct hclge_desc) - HCLGE_DESC_NO_DATA_LEN;
+ buf_size = buf_len / sizeof(u32);
+
+ desc_data = kzalloc(buf_len, GFP_KERNEL);
+ if (!desc_data) {
+ ret = -ENOMEM;
+ goto err_desc;
+ }
+
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_buf_alloc;
+ }
+
+ memcpy(desc_data, &desc[0].data[0], buf_len);
+ for (i = 0; i < buf_size; i++)
+ buf[i] = le32_to_cpu(desc_data[i]);
+
+ hclge_handle_error_module_log(ae_dev, buf, buf_size);
+ kfree(buf);
+
+err_buf_alloc:
+ kfree(desc_data);
+err_desc:
+ kfree(desc);
+out:
+ return ret;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
new file mode 100644
index 0000000000..86be6fb329
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
@@ -0,0 +1,231 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2016-2017 Hisilicon Limited. */
+
+#ifndef __HCLGE_ERR_H
+#define __HCLGE_ERR_H
+
+#include "hclge_main.h"
+#include "hnae3.h"
+
+#define HCLGE_MPF_RAS_INT_MIN_BD_NUM 10
+#define HCLGE_PF_RAS_INT_MIN_BD_NUM 4
+#define HCLGE_MPF_MSIX_INT_MIN_BD_NUM 10
+#define HCLGE_PF_MSIX_INT_MIN_BD_NUM 4
+
+#define HCLGE_RAS_PF_OTHER_INT_STS_REG 0x20B00
+#define HCLGE_RAS_REG_NFE_MASK 0xFF00
+#define HCLGE_RAS_REG_ROCEE_ERR_MASK 0x3000000
+#define HCLGE_RAS_REG_ERR_MASK \
+ (HCLGE_RAS_REG_NFE_MASK | HCLGE_RAS_REG_ROCEE_ERR_MASK)
+
+#define HCLGE_VECTOR0_REG_MSIX_MASK 0x1FF00
+
+#define HCLGE_IMP_TCM_ECC_ERR_INT_EN 0xFFFF0000
+#define HCLGE_IMP_TCM_ECC_ERR_INT_EN_MASK 0xFFFF0000
+#define HCLGE_IMP_ITCM4_ECC_ERR_INT_EN 0x300
+#define HCLGE_IMP_ITCM4_ECC_ERR_INT_EN_MASK 0x300
+#define HCLGE_CMDQ_NIC_ECC_ERR_INT_EN 0xFFFF
+#define HCLGE_CMDQ_NIC_ECC_ERR_INT_EN_MASK 0xFFFF
+#define HCLGE_CMDQ_ROCEE_ECC_ERR_INT_EN 0xFFFF0000
+#define HCLGE_CMDQ_ROCEE_ECC_ERR_INT_EN_MASK 0xFFFF0000
+#define HCLGE_IMP_RD_POISON_ERR_INT_EN 0x0100
+#define HCLGE_IMP_RD_POISON_ERR_INT_EN_MASK 0x0100
+#define HCLGE_TQP_ECC_ERR_INT_EN 0x0FFF
+#define HCLGE_TQP_ECC_ERR_INT_EN_MASK 0x0FFF
+#define HCLGE_MSIX_SRAM_ECC_ERR_INT_EN_MASK 0x0F000000
+#define HCLGE_MSIX_SRAM_ECC_ERR_INT_EN 0x0F000000
+#define HCLGE_IGU_ERR_INT_EN 0x0000000F
+#define HCLGE_IGU_ERR_INT_TYPE 0x00000660
+#define HCLGE_IGU_ERR_INT_EN_MASK 0x000F
+#define HCLGE_IGU_TNL_ERR_INT_EN 0x0002AABF
+#define HCLGE_IGU_TNL_ERR_INT_EN_MASK 0x003F
+#define HCLGE_PPP_MPF_ECC_ERR_INT0_EN 0xFFFFFFFF
+#define HCLGE_PPP_MPF_ECC_ERR_INT0_EN_MASK 0xFFFFFFFF
+#define HCLGE_PPP_MPF_ECC_ERR_INT1_EN 0xFFFFFFFF
+#define HCLGE_PPP_MPF_ECC_ERR_INT1_EN_MASK 0xFFFFFFFF
+#define HCLGE_PPP_PF_ERR_INT_EN 0x0003
+#define HCLGE_PPP_PF_ERR_INT_EN_MASK 0x0003
+#define HCLGE_PPP_MPF_ECC_ERR_INT2_EN 0x003F
+#define HCLGE_PPP_MPF_ECC_ERR_INT2_EN_MASK 0x003F
+#define HCLGE_PPP_MPF_ECC_ERR_INT3_EN 0x003F
+#define HCLGE_PPP_MPF_ECC_ERR_INT3_EN_MASK 0x003F
+#define HCLGE_TM_SCH_ECC_ERR_INT_EN 0x3
+#define HCLGE_TM_QCN_ERR_INT_TYPE 0x29
+#define HCLGE_TM_QCN_FIFO_INT_EN 0xFFFF00
+#define HCLGE_TM_QCN_MEM_ERR_INT_EN 0xFFFFFF
+#define HCLGE_NCSI_ERR_INT_EN 0x3
+#define HCLGE_NCSI_ERR_INT_TYPE 0x9
+#define HCLGE_MAC_COMMON_ERR_INT_EN 0x107FF
+#define HCLGE_MAC_COMMON_ERR_INT_EN_MASK 0x107FF
+#define HCLGE_MAC_TNL_INT_EN GENMASK(9, 0)
+#define HCLGE_MAC_TNL_INT_EN_MASK GENMASK(9, 0)
+#define HCLGE_MAC_TNL_INT_CLR GENMASK(9, 0)
+#define HCLGE_PPU_MPF_ABNORMAL_INT0_EN GENMASK(31, 0)
+#define HCLGE_PPU_MPF_ABNORMAL_INT0_EN_MASK GENMASK(31, 0)
+#define HCLGE_PPU_MPF_ABNORMAL_INT1_EN GENMASK(31, 0)
+#define HCLGE_PPU_MPF_ABNORMAL_INT1_EN_MASK GENMASK(31, 0)
+#define HCLGE_PPU_MPF_ABNORMAL_INT2_EN 0x3FFF3FFF
+#define HCLGE_PPU_MPF_ABNORMAL_INT2_EN_MASK 0x3FFF3FFF
+#define HCLGE_PPU_MPF_ABNORMAL_INT2_EN2 0xB
+#define HCLGE_PPU_MPF_ABNORMAL_INT2_EN2_MASK 0xB
+#define HCLGE_PPU_MPF_ABNORMAL_INT3_EN GENMASK(7, 0)
+#define HCLGE_PPU_MPF_ABNORMAL_INT3_EN_MASK GENMASK(23, 16)
+#define HCLGE_PPU_PF_ABNORMAL_INT_EN GENMASK(5, 0)
+#define HCLGE_PPU_PF_ABNORMAL_INT_EN_MASK GENMASK(5, 0)
+#define HCLGE_SSU_1BIT_ECC_ERR_INT_EN GENMASK(31, 0)
+#define HCLGE_SSU_1BIT_ECC_ERR_INT_EN_MASK GENMASK(31, 0)
+#define HCLGE_SSU_MULTI_BIT_ECC_ERR_INT_EN GENMASK(31, 0)
+#define HCLGE_SSU_MULTI_BIT_ECC_ERR_INT_EN_MASK GENMASK(31, 0)
+#define HCLGE_SSU_BIT32_ECC_ERR_INT_EN 0x0101
+#define HCLGE_SSU_BIT32_ECC_ERR_INT_EN_MASK 0x0101
+#define HCLGE_SSU_COMMON_INT_EN GENMASK(9, 0)
+#define HCLGE_SSU_COMMON_INT_EN_MASK GENMASK(9, 0)
+#define HCLGE_SSU_PORT_BASED_ERR_INT_EN 0x0BFF
+#define HCLGE_SSU_PORT_BASED_ERR_INT_EN_MASK 0x0BFF0000
+#define HCLGE_SSU_FIFO_OVERFLOW_ERR_INT_EN GENMASK(23, 0)
+#define HCLGE_SSU_FIFO_OVERFLOW_ERR_INT_EN_MASK GENMASK(23, 0)
+
+#define HCLGE_SSU_COMMON_ERR_INT_MASK GENMASK(9, 0)
+#define HCLGE_SSU_PORT_INT_MSIX_MASK 0x7BFF
+#define HCLGE_IGU_INT_MASK GENMASK(3, 0)
+#define HCLGE_IGU_EGU_TNL_INT_MASK GENMASK(5, 0)
+#define HCLGE_PPP_MPF_INT_ST3_MASK GENMASK(5, 0)
+#define HCLGE_PPU_MPF_INT_ST3_MASK GENMASK(7, 0)
+#define HCLGE_PPU_MPF_INT_ST2_MSIX_MASK BIT(29)
+#define HCLGE_PPU_PF_INT_RAS_MASK 0x18
+#define HCLGE_PPU_PF_INT_MSIX_MASK 0x26
+#define HCLGE_PPU_PF_OVER_8BD_ERR_MASK 0x01
+#define HCLGE_QCN_FIFO_INT_MASK GENMASK(17, 0)
+#define HCLGE_QCN_ECC_INT_MASK GENMASK(21, 0)
+#define HCLGE_NCSI_ECC_INT_MASK GENMASK(1, 0)
+
+#define HCLGE_ROCEE_RAS_NFE_INT_EN 0xF
+#define HCLGE_ROCEE_RAS_CE_INT_EN 0x1
+#define HCLGE_ROCEE_RAS_NFE_INT_EN_MASK 0xF
+#define HCLGE_ROCEE_RAS_CE_INT_EN_MASK 0x1
+#define HCLGE_ROCEE_RERR_INT_MASK BIT(0)
+#define HCLGE_ROCEE_BERR_INT_MASK BIT(1)
+#define HCLGE_ROCEE_AXI_ERR_INT_MASK GENMASK(1, 0)
+#define HCLGE_ROCEE_ECC_INT_MASK BIT(2)
+#define HCLGE_ROCEE_OVF_INT_MASK BIT(3)
+#define HCLGE_ROCEE_OVF_ERR_INT_MASK 0x10000
+#define HCLGE_ROCEE_OVF_ERR_TYPE_MASK 0x3F
+
+#define HCLGE_DESC_DATA_MAX 8
+#define HCLGE_REG_NUM_MAX 256
+#define HCLGE_DESC_NO_DATA_LEN 8
+
+enum hclge_err_int_type {
+ HCLGE_ERR_INT_MSIX = 0,
+ HCLGE_ERR_INT_RAS_CE = 1,
+ HCLGE_ERR_INT_RAS_NFE = 2,
+ HCLGE_ERR_INT_RAS_FE = 3,
+};
+
+enum hclge_mod_name_list {
+ MODULE_NONE = 0,
+ MODULE_BIOS_COMMON = 1,
+ MODULE_GE = 2,
+ MODULE_IGU_EGU = 3,
+ MODULE_LGE = 4,
+ MODULE_NCSI = 5,
+ MODULE_PPP = 6,
+ MODULE_QCN = 7,
+ MODULE_RCB_RX = 8,
+ MODULE_RTC = 9,
+ MODULE_SSU = 10,
+ MODULE_TM = 11,
+ MODULE_RCB_TX = 12,
+ MODULE_TXDMA = 13,
+ MODULE_MASTER = 14,
+ MODULE_HIMAC = 15,
+ /* add new MODULE NAME for NIC here in order */
+ MODULE_ROCEE_TOP = 40,
+ MODULE_ROCEE_TIMER = 41,
+ MODULE_ROCEE_MDB = 42,
+ MODULE_ROCEE_TSP = 43,
+ MODULE_ROCEE_TRP = 44,
+ MODULE_ROCEE_SCC = 45,
+ MODULE_ROCEE_CAEP = 46,
+ MODULE_ROCEE_GEN_AC = 47,
+ MODULE_ROCEE_QMM = 48,
+ MODULE_ROCEE_LSAN = 49,
+ /* add new MODULE NAME for RoCEE here in order */
+};
+
+enum hclge_err_type_list {
+ NONE_ERROR = 0,
+ FIFO_ERROR = 1,
+ MEMORY_ERROR = 2,
+ POISON_ERROR = 3,
+ MSIX_ECC_ERROR = 4,
+ TQP_INT_ECC_ERROR = 5,
+ PF_ABNORMAL_INT_ERROR = 6,
+ MPF_ABNORMAL_INT_ERROR = 7,
+ COMMON_ERROR = 8,
+ PORT_ERROR = 9,
+ ETS_ERROR = 10,
+ NCSI_ERROR = 11,
+ GLB_ERROR = 12,
+ LINK_ERROR = 13,
+ PTP_ERROR = 14,
+ /* add new ERROR TYPE for NIC here in order */
+ ROCEE_NORMAL_ERR = 40,
+ ROCEE_OVF_ERR = 41,
+ ROCEE_BUS_ERR = 42,
+ /* add new ERROR TYPE for ROCEE here in order */
+};
+
+struct hclge_hw_blk {
+ u32 msk;
+ const char *name;
+ int (*config_err_int)(struct hclge_dev *hdev, bool en);
+};
+
+struct hclge_hw_error {
+ u32 int_msk;
+ const char *msg;
+ enum hnae3_reset_type reset_level;
+};
+
+struct hclge_hw_module_id {
+ enum hclge_mod_name_list module_id;
+ const char *msg;
+};
+
+struct hclge_hw_type_id {
+ enum hclge_err_type_list type_id;
+ const char *msg;
+};
+
+struct hclge_sum_err_info {
+ u8 reset_type;
+ u8 mod_num;
+ u8 rsv[2];
+};
+
+struct hclge_mod_err_info {
+ u8 mod_id;
+ u8 err_num;
+ u8 rsv[2];
+};
+
+struct hclge_type_reg_err_info {
+ u8 type_id;
+ u8 reg_num;
+ u8 rsv[2];
+ u32 hclge_reg[HCLGE_REG_NUM_MAX];
+};
+
+int hclge_config_mac_tnl_int(struct hclge_dev *hdev, bool en);
+int hclge_config_nic_hw_error(struct hclge_dev *hdev, bool state);
+int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en);
+void hclge_handle_all_hns_hw_errors(struct hnae3_ae_dev *ae_dev);
+bool hclge_find_error_source(struct hclge_dev *hdev);
+void hclge_handle_occurred_error(struct hclge_dev *hdev);
+pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev);
+int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
+ unsigned long *reset_requests);
+int hclge_handle_error_info_log(struct hnae3_ae_dev *ae_dev);
+int hclge_handle_mac_tnl(struct hclge_dev *hdev);
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
new file mode 100644
index 0000000000..a61d9fd732
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -0,0 +1,12793 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
+
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/etherdevice.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/if_vlan.h>
+#include <linux/crash_dump.h>
+#include <net/ipv6.h>
+#include <net/rtnetlink.h>
+#include "hclge_cmd.h"
+#include "hclge_dcb.h"
+#include "hclge_main.h"
+#include "hclge_mbx.h"
+#include "hclge_mdio.h"
+#include "hclge_regs.h"
+#include "hclge_tm.h"
+#include "hclge_err.h"
+#include "hnae3.h"
+#include "hclge_devlink.h"
+#include "hclge_comm_cmd.h"
+
+#define HCLGE_NAME "hclge"
+
+#define HCLGE_BUF_SIZE_UNIT 256U
+#define HCLGE_BUF_MUL_BY 2
+#define HCLGE_BUF_DIV_BY 2
+#define NEED_RESERVE_TC_NUM 2
+#define BUF_MAX_PERCENT 100
+#define BUF_RESERVE_PERCENT 90
+
+#define HCLGE_RESET_MAX_FAIL_CNT 5
+#define HCLGE_RESET_SYNC_TIME 100
+#define HCLGE_PF_RESET_SYNC_TIME 20
+#define HCLGE_PF_RESET_SYNC_CNT 1500
+
+#define HCLGE_LINK_STATUS_MS 10
+
+static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
+static int hclge_init_vlan_config(struct hclge_dev *hdev);
+static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
+static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
+static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
+static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
+static int hclge_clear_arfs_rules(struct hclge_dev *hdev);
+static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
+ unsigned long *addr);
+static int hclge_set_default_loopback(struct hclge_dev *hdev);
+
+static void hclge_sync_mac_table(struct hclge_dev *hdev);
+static void hclge_restore_hw_table(struct hclge_dev *hdev);
+static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
+static void hclge_sync_fd_table(struct hclge_dev *hdev);
+static void hclge_update_fec_stats(struct hclge_dev *hdev);
+static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret,
+ int wait_cnt);
+static int hclge_update_port_info(struct hclge_dev *hdev);
+
+static struct hnae3_ae_algo ae_algo;
+
+static struct workqueue_struct *hclge_wq;
+
+static const struct pci_device_id ae_algo_pci_tbl[] = {
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
+ /* required last entry */
+ {0, }
+};
+
+MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
+
+static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
+ "External Loopback test",
+ "App Loopback test",
+ "Serdes serial Loopback test",
+ "Serdes parallel Loopback test",
+ "Phy Loopback test"
+};
+
+static const struct hclge_comm_stats_str g_mac_stats_string[] = {
+ {"mac_tx_mac_pause_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
+ {"mac_rx_mac_pause_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
+ {"mac_tx_pause_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pause_xoff_time)},
+ {"mac_rx_pause_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pause_xoff_time)},
+ {"mac_tx_control_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
+ {"mac_rx_control_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
+ {"mac_tx_pfc_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
+ {"mac_tx_pfc_pri0_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
+ {"mac_tx_pfc_pri1_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
+ {"mac_tx_pfc_pri2_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
+ {"mac_tx_pfc_pri3_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
+ {"mac_tx_pfc_pri4_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
+ {"mac_tx_pfc_pri5_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
+ {"mac_tx_pfc_pri6_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
+ {"mac_tx_pfc_pri7_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
+ {"mac_tx_pfc_pri0_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_xoff_time)},
+ {"mac_tx_pfc_pri1_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_xoff_time)},
+ {"mac_tx_pfc_pri2_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_xoff_time)},
+ {"mac_tx_pfc_pri3_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_xoff_time)},
+ {"mac_tx_pfc_pri4_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_xoff_time)},
+ {"mac_tx_pfc_pri5_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_xoff_time)},
+ {"mac_tx_pfc_pri6_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_xoff_time)},
+ {"mac_tx_pfc_pri7_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_xoff_time)},
+ {"mac_rx_pfc_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
+ {"mac_rx_pfc_pri0_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
+ {"mac_rx_pfc_pri1_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
+ {"mac_rx_pfc_pri2_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
+ {"mac_rx_pfc_pri3_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
+ {"mac_rx_pfc_pri4_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
+ {"mac_rx_pfc_pri5_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
+ {"mac_rx_pfc_pri6_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
+ {"mac_rx_pfc_pri7_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
+ {"mac_rx_pfc_pri0_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_xoff_time)},
+ {"mac_rx_pfc_pri1_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_xoff_time)},
+ {"mac_rx_pfc_pri2_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_xoff_time)},
+ {"mac_rx_pfc_pri3_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_xoff_time)},
+ {"mac_rx_pfc_pri4_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_xoff_time)},
+ {"mac_rx_pfc_pri5_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_xoff_time)},
+ {"mac_rx_pfc_pri6_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_xoff_time)},
+ {"mac_rx_pfc_pri7_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_xoff_time)},
+ {"mac_tx_total_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
+ {"mac_tx_total_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
+ {"mac_tx_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
+ {"mac_tx_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
+ {"mac_tx_good_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
+ {"mac_tx_bad_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
+ {"mac_tx_uni_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
+ {"mac_tx_multi_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
+ {"mac_tx_broad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
+ {"mac_tx_undersize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
+ {"mac_tx_oversize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
+ {"mac_tx_64_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
+ {"mac_tx_65_127_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
+ {"mac_tx_128_255_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
+ {"mac_tx_256_511_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
+ {"mac_tx_512_1023_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
+ {"mac_tx_1024_1518_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
+ {"mac_tx_1519_2047_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
+ {"mac_tx_2048_4095_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
+ {"mac_tx_4096_8191_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
+ {"mac_tx_8192_9216_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
+ {"mac_tx_9217_12287_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
+ {"mac_tx_12288_16383_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
+ {"mac_tx_1519_max_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
+ {"mac_tx_1519_max_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
+ {"mac_rx_total_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
+ {"mac_rx_total_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
+ {"mac_rx_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
+ {"mac_rx_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
+ {"mac_rx_good_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
+ {"mac_rx_bad_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
+ {"mac_rx_uni_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
+ {"mac_rx_multi_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
+ {"mac_rx_broad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
+ {"mac_rx_undersize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
+ {"mac_rx_oversize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
+ {"mac_rx_64_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
+ {"mac_rx_65_127_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
+ {"mac_rx_128_255_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
+ {"mac_rx_256_511_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
+ {"mac_rx_512_1023_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
+ {"mac_rx_1024_1518_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
+ {"mac_rx_1519_2047_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
+ {"mac_rx_2048_4095_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
+ {"mac_rx_4096_8191_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
+ {"mac_rx_8192_9216_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
+ {"mac_rx_9217_12287_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
+ {"mac_rx_12288_16383_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
+ {"mac_rx_1519_max_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
+ {"mac_rx_1519_max_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
+
+ {"mac_tx_fragment_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
+ {"mac_tx_undermin_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
+ {"mac_tx_jabber_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
+ {"mac_tx_err_all_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
+ {"mac_tx_from_app_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
+ {"mac_tx_from_app_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
+ {"mac_rx_fragment_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
+ {"mac_rx_undermin_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
+ {"mac_rx_jabber_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
+ {"mac_rx_fcs_err_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
+ {"mac_rx_send_app_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
+ {"mac_rx_send_app_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
+};
+
+static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
+ {
+ .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
+ .ethter_type = cpu_to_le16(ETH_P_LLDP),
+ .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
+ .i_port_bitmap = 0x1,
+ },
+};
+
+static const struct key_info meta_data_key_info[] = {
+ { PACKET_TYPE_ID, 6 },
+ { IP_FRAGEMENT, 1 },
+ { ROCE_TYPE, 1 },
+ { NEXT_KEY, 5 },
+ { VLAN_NUMBER, 2 },
+ { SRC_VPORT, 12 },
+ { DST_VPORT, 12 },
+ { TUNNEL_PACKET, 1 },
+};
+
+static const struct key_info tuple_key_info[] = {
+ { OUTER_DST_MAC, 48, KEY_OPT_MAC, -1, -1 },
+ { OUTER_SRC_MAC, 48, KEY_OPT_MAC, -1, -1 },
+ { OUTER_VLAN_TAG_FST, 16, KEY_OPT_LE16, -1, -1 },
+ { OUTER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
+ { OUTER_ETH_TYPE, 16, KEY_OPT_LE16, -1, -1 },
+ { OUTER_L2_RSV, 16, KEY_OPT_LE16, -1, -1 },
+ { OUTER_IP_TOS, 8, KEY_OPT_U8, -1, -1 },
+ { OUTER_IP_PROTO, 8, KEY_OPT_U8, -1, -1 },
+ { OUTER_SRC_IP, 32, KEY_OPT_IP, -1, -1 },
+ { OUTER_DST_IP, 32, KEY_OPT_IP, -1, -1 },
+ { OUTER_L3_RSV, 16, KEY_OPT_LE16, -1, -1 },
+ { OUTER_SRC_PORT, 16, KEY_OPT_LE16, -1, -1 },
+ { OUTER_DST_PORT, 16, KEY_OPT_LE16, -1, -1 },
+ { OUTER_L4_RSV, 32, KEY_OPT_LE32, -1, -1 },
+ { OUTER_TUN_VNI, 24, KEY_OPT_VNI, -1, -1 },
+ { OUTER_TUN_FLOW_ID, 8, KEY_OPT_U8, -1, -1 },
+ { INNER_DST_MAC, 48, KEY_OPT_MAC,
+ offsetof(struct hclge_fd_rule, tuples.dst_mac),
+ offsetof(struct hclge_fd_rule, tuples_mask.dst_mac) },
+ { INNER_SRC_MAC, 48, KEY_OPT_MAC,
+ offsetof(struct hclge_fd_rule, tuples.src_mac),
+ offsetof(struct hclge_fd_rule, tuples_mask.src_mac) },
+ { INNER_VLAN_TAG_FST, 16, KEY_OPT_LE16,
+ offsetof(struct hclge_fd_rule, tuples.vlan_tag1),
+ offsetof(struct hclge_fd_rule, tuples_mask.vlan_tag1) },
+ { INNER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
+ { INNER_ETH_TYPE, 16, KEY_OPT_LE16,
+ offsetof(struct hclge_fd_rule, tuples.ether_proto),
+ offsetof(struct hclge_fd_rule, tuples_mask.ether_proto) },
+ { INNER_L2_RSV, 16, KEY_OPT_LE16,
+ offsetof(struct hclge_fd_rule, tuples.l2_user_def),
+ offsetof(struct hclge_fd_rule, tuples_mask.l2_user_def) },
+ { INNER_IP_TOS, 8, KEY_OPT_U8,
+ offsetof(struct hclge_fd_rule, tuples.ip_tos),
+ offsetof(struct hclge_fd_rule, tuples_mask.ip_tos) },
+ { INNER_IP_PROTO, 8, KEY_OPT_U8,
+ offsetof(struct hclge_fd_rule, tuples.ip_proto),
+ offsetof(struct hclge_fd_rule, tuples_mask.ip_proto) },
+ { INNER_SRC_IP, 32, KEY_OPT_IP,
+ offsetof(struct hclge_fd_rule, tuples.src_ip),
+ offsetof(struct hclge_fd_rule, tuples_mask.src_ip) },
+ { INNER_DST_IP, 32, KEY_OPT_IP,
+ offsetof(struct hclge_fd_rule, tuples.dst_ip),
+ offsetof(struct hclge_fd_rule, tuples_mask.dst_ip) },
+ { INNER_L3_RSV, 16, KEY_OPT_LE16,
+ offsetof(struct hclge_fd_rule, tuples.l3_user_def),
+ offsetof(struct hclge_fd_rule, tuples_mask.l3_user_def) },
+ { INNER_SRC_PORT, 16, KEY_OPT_LE16,
+ offsetof(struct hclge_fd_rule, tuples.src_port),
+ offsetof(struct hclge_fd_rule, tuples_mask.src_port) },
+ { INNER_DST_PORT, 16, KEY_OPT_LE16,
+ offsetof(struct hclge_fd_rule, tuples.dst_port),
+ offsetof(struct hclge_fd_rule, tuples_mask.dst_port) },
+ { INNER_L4_RSV, 32, KEY_OPT_LE32,
+ offsetof(struct hclge_fd_rule, tuples.l4_user_def),
+ offsetof(struct hclge_fd_rule, tuples_mask.l4_user_def) },
+};
+
+/**
+ * hclge_cmd_send - send command to command queue
+ * @hw: pointer to the hw struct
+ * @desc: prefilled descriptor for describing the command
+ * @num : the number of descriptors to be sent
+ *
+ * This is the main send command for command queue, it
+ * sends the queue, cleans the queue, etc
+ **/
+int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
+{
+ return hclge_comm_cmd_send(&hw->hw, desc, num);
+}
+
+static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
+{
+#define HCLGE_MAC_CMD_NUM 21
+
+ u64 *data = (u64 *)(&hdev->mac_stats);
+ struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
+ __le64 *desc_data;
+ u32 data_size;
+ int ret;
+ u32 i;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get MAC pkt stats fail, status = %d.\n", ret);
+
+ return ret;
+ }
+
+ /* The first desc has a 64-bit header, so data size need to minus 1 */
+ data_size = sizeof(desc) / (sizeof(u64)) - 1;
+
+ desc_data = (__le64 *)(&desc[0].data[0]);
+ for (i = 0; i < data_size; i++) {
+ /* data memory is continuous becase only the first desc has a
+ * header in this command
+ */
+ *data += le64_to_cpu(*desc_data);
+ data++;
+ desc_data++;
+ }
+
+ return 0;
+}
+
+static int hclge_mac_update_stats_complete(struct hclge_dev *hdev)
+{
+#define HCLGE_REG_NUM_PER_DESC 4
+
+ u32 reg_num = hdev->ae_dev->dev_specs.mac_stats_num;
+ u64 *data = (u64 *)(&hdev->mac_stats);
+ struct hclge_desc *desc;
+ __le64 *desc_data;
+ u32 data_size;
+ u32 desc_num;
+ int ret;
+ u32 i;
+
+ /* The first desc has a 64-bit header, so need to consider it */
+ desc_num = reg_num / HCLGE_REG_NUM_PER_DESC + 1;
+
+ /* This may be called inside atomic sections,
+ * so GFP_ATOMIC is more suitalbe here
+ */
+ desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
+ if (!desc)
+ return -ENOMEM;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
+ if (ret) {
+ kfree(desc);
+ return ret;
+ }
+
+ data_size = min_t(u32, sizeof(hdev->mac_stats) / sizeof(u64), reg_num);
+
+ desc_data = (__le64 *)(&desc[0].data[0]);
+ for (i = 0; i < data_size; i++) {
+ /* data memory is continuous becase only the first desc has a
+ * header in this command
+ */
+ *data += le64_to_cpu(*desc_data);
+ data++;
+ desc_data++;
+ }
+
+ kfree(desc);
+
+ return 0;
+}
+
+static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *reg_num)
+{
+ struct hclge_desc desc;
+ int ret;
+
+ /* Driver needs total register number of both valid registers and
+ * reserved registers, but the old firmware only returns number
+ * of valid registers in device V2. To be compatible with these
+ * devices, driver uses a fixed value.
+ */
+ if (hdev->ae_dev->dev_version == HNAE3_DEVICE_VERSION_V2) {
+ *reg_num = HCLGE_MAC_STATS_MAX_NUM_V1;
+ return 0;
+ }
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to query mac statistic reg number, ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ *reg_num = le32_to_cpu(desc.data[0]);
+ if (*reg_num == 0) {
+ dev_err(&hdev->pdev->dev,
+ "mac statistic reg number is invalid!\n");
+ return -ENODATA;
+ }
+
+ return 0;
+}
+
+int hclge_mac_update_stats(struct hclge_dev *hdev)
+{
+ /* The firmware supports the new statistics acquisition method */
+ if (hdev->ae_dev->dev_specs.mac_stats_num)
+ return hclge_mac_update_stats_complete(hdev);
+ else
+ return hclge_mac_update_stats_defective(hdev);
+}
+
+static int hclge_comm_get_count(struct hclge_dev *hdev,
+ const struct hclge_comm_stats_str strs[],
+ u32 size)
+{
+ int count = 0;
+ u32 i;
+
+ for (i = 0; i < size; i++)
+ if (strs[i].stats_num <= hdev->ae_dev->dev_specs.mac_stats_num)
+ count++;
+
+ return count;
+}
+
+static u64 *hclge_comm_get_stats(struct hclge_dev *hdev,
+ const struct hclge_comm_stats_str strs[],
+ int size, u64 *data)
+{
+ u64 *buf = data;
+ u32 i;
+
+ for (i = 0; i < size; i++) {
+ if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num)
+ continue;
+
+ *buf = HCLGE_STATS_READ(&hdev->mac_stats, strs[i].offset);
+ buf++;
+ }
+
+ return buf;
+}
+
+static u8 *hclge_comm_get_strings(struct hclge_dev *hdev, u32 stringset,
+ const struct hclge_comm_stats_str strs[],
+ int size, u8 *data)
+{
+ char *buff = (char *)data;
+ u32 i;
+
+ if (stringset != ETH_SS_STATS)
+ return buff;
+
+ for (i = 0; i < size; i++) {
+ if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num)
+ continue;
+
+ snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
+ buff = buff + ETH_GSTRING_LEN;
+ }
+
+ return (u8 *)buff;
+}
+
+static void hclge_update_stats_for_all(struct hclge_dev *hdev)
+{
+ struct hnae3_handle *handle;
+ int status;
+
+ handle = &hdev->vport[0].nic;
+ if (handle->client) {
+ status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw);
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "Update TQPS stats fail, status = %d.\n",
+ status);
+ }
+ }
+
+ hclge_update_fec_stats(hdev);
+
+ status = hclge_mac_update_stats(hdev);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "Update MAC stats fail, status = %d.\n", status);
+}
+
+static void hclge_update_stats(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ int status;
+
+ if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
+ return;
+
+ status = hclge_mac_update_stats(hdev);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "Update MAC stats fail, status = %d.\n",
+ status);
+
+ status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "Update TQPS stats fail, status = %d.\n",
+ status);
+
+ clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
+}
+
+static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
+{
+#define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK | \
+ HNAE3_SUPPORT_PHY_LOOPBACK | \
+ HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK | \
+ HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK | \
+ HNAE3_SUPPORT_EXTERNAL_LOOPBACK)
+
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ int count = 0;
+
+ /* Loopback test support rules:
+ * mac: only GE mode support
+ * serdes: all mac mode will support include GE/XGE/LGE/CGE
+ * phy: only support when phy device exist on board
+ */
+ if (stringset == ETH_SS_TEST) {
+ /* clear loopback bit flags at first */
+ handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
+ hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
+ hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
+ hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
+ count += 1;
+ handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
+ }
+
+ count += 1;
+ handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
+ count += 1;
+ handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
+ count += 1;
+ handle->flags |= HNAE3_SUPPORT_EXTERNAL_LOOPBACK;
+
+ if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
+ hdev->hw.mac.phydev->drv->set_loopback) ||
+ hnae3_dev_phy_imp_supported(hdev)) {
+ count += 1;
+ handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
+ }
+ } else if (stringset == ETH_SS_STATS) {
+ count = hclge_comm_get_count(hdev, g_mac_stats_string,
+ ARRAY_SIZE(g_mac_stats_string)) +
+ hclge_comm_tqps_get_sset_count(handle);
+ }
+
+ return count;
+}
+
+static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
+ u8 *data)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ u8 *p = (char *)data;
+ int size;
+
+ if (stringset == ETH_SS_STATS) {
+ size = ARRAY_SIZE(g_mac_stats_string);
+ p = hclge_comm_get_strings(hdev, stringset, g_mac_stats_string,
+ size, p);
+ p = hclge_comm_tqps_get_strings(handle, p);
+ } else if (stringset == ETH_SS_TEST) {
+ if (handle->flags & HNAE3_SUPPORT_EXTERNAL_LOOPBACK) {
+ memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_EXTERNAL],
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
+ memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
+ memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
+ memcpy(p,
+ hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
+ memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ }
+}
+
+static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ u64 *p;
+
+ p = hclge_comm_get_stats(hdev, g_mac_stats_string,
+ ARRAY_SIZE(g_mac_stats_string), data);
+ p = hclge_comm_tqps_get_stats(handle, p);
+}
+
+static void hclge_get_mac_stat(struct hnae3_handle *handle,
+ struct hns3_mac_stats *mac_stats)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ hclge_update_stats(handle);
+
+ mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
+ mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
+}
+
+static int hclge_parse_func_status(struct hclge_dev *hdev,
+ struct hclge_func_status_cmd *status)
+{
+#define HCLGE_MAC_ID_MASK 0xF
+
+ if (!(status->pf_state & HCLGE_PF_STATE_DONE))
+ return -EINVAL;
+
+ /* Set the pf to main pf */
+ if (status->pf_state & HCLGE_PF_STATE_MAIN)
+ hdev->flag |= HCLGE_FLAG_MAIN;
+ else
+ hdev->flag &= ~HCLGE_FLAG_MAIN;
+
+ hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
+ return 0;
+}
+
+static int hclge_query_function_status(struct hclge_dev *hdev)
+{
+#define HCLGE_QUERY_MAX_CNT 5
+
+ struct hclge_func_status_cmd *req;
+ struct hclge_desc desc;
+ int timeout = 0;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
+ req = (struct hclge_func_status_cmd *)desc.data;
+
+ do {
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "query function status failed %d.\n", ret);
+ return ret;
+ }
+
+ /* Check pf reset is done */
+ if (req->pf_state)
+ break;
+ usleep_range(1000, 2000);
+ } while (timeout++ < HCLGE_QUERY_MAX_CNT);
+
+ return hclge_parse_func_status(hdev, req);
+}
+
+static int hclge_query_pf_resource(struct hclge_dev *hdev)
+{
+ struct hclge_pf_res_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "query pf resource failed %d.\n", ret);
+ return ret;
+ }
+
+ req = (struct hclge_pf_res_cmd *)desc.data;
+ hdev->num_tqps = le16_to_cpu(req->tqp_num) +
+ le16_to_cpu(req->ext_tqp_num);
+ hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
+
+ if (req->tx_buf_size)
+ hdev->tx_buf_size =
+ le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
+ else
+ hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
+
+ hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
+
+ if (req->dv_buf_size)
+ hdev->dv_buf_size =
+ le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
+ else
+ hdev->dv_buf_size = HCLGE_DEFAULT_DV;
+
+ hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
+
+ hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic);
+ if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
+ dev_err(&hdev->pdev->dev,
+ "only %u msi resources available, not enough for pf(min:2).\n",
+ hdev->num_nic_msi);
+ return -EINVAL;
+ }
+
+ if (hnae3_dev_roce_supported(hdev)) {
+ hdev->num_roce_msi =
+ le16_to_cpu(req->pf_intr_vector_number_roce);
+
+ /* PF should have NIC vectors and Roce vectors,
+ * NIC vectors are queued before Roce vectors.
+ */
+ hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi;
+ } else {
+ hdev->num_msi = hdev->num_nic_msi;
+ }
+
+ return 0;
+}
+
+static int hclge_parse_speed(u8 speed_cmd, u32 *speed)
+{
+ switch (speed_cmd) {
+ case HCLGE_FW_MAC_SPEED_10M:
+ *speed = HCLGE_MAC_SPEED_10M;
+ break;
+ case HCLGE_FW_MAC_SPEED_100M:
+ *speed = HCLGE_MAC_SPEED_100M;
+ break;
+ case HCLGE_FW_MAC_SPEED_1G:
+ *speed = HCLGE_MAC_SPEED_1G;
+ break;
+ case HCLGE_FW_MAC_SPEED_10G:
+ *speed = HCLGE_MAC_SPEED_10G;
+ break;
+ case HCLGE_FW_MAC_SPEED_25G:
+ *speed = HCLGE_MAC_SPEED_25G;
+ break;
+ case HCLGE_FW_MAC_SPEED_40G:
+ *speed = HCLGE_MAC_SPEED_40G;
+ break;
+ case HCLGE_FW_MAC_SPEED_50G:
+ *speed = HCLGE_MAC_SPEED_50G;
+ break;
+ case HCLGE_FW_MAC_SPEED_100G:
+ *speed = HCLGE_MAC_SPEED_100G;
+ break;
+ case HCLGE_FW_MAC_SPEED_200G:
+ *speed = HCLGE_MAC_SPEED_200G;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct hclge_speed_bit_map speed_bit_map[] = {
+ {HCLGE_MAC_SPEED_10M, HCLGE_SUPPORT_10M_BIT},
+ {HCLGE_MAC_SPEED_100M, HCLGE_SUPPORT_100M_BIT},
+ {HCLGE_MAC_SPEED_1G, HCLGE_SUPPORT_1G_BIT},
+ {HCLGE_MAC_SPEED_10G, HCLGE_SUPPORT_10G_BIT},
+ {HCLGE_MAC_SPEED_25G, HCLGE_SUPPORT_25G_BIT},
+ {HCLGE_MAC_SPEED_40G, HCLGE_SUPPORT_40G_BIT},
+ {HCLGE_MAC_SPEED_50G, HCLGE_SUPPORT_50G_BIT},
+ {HCLGE_MAC_SPEED_100G, HCLGE_SUPPORT_100G_BIT},
+ {HCLGE_MAC_SPEED_200G, HCLGE_SUPPORT_200G_BIT},
+};
+
+static int hclge_get_speed_bit(u32 speed, u32 *speed_bit)
+{
+ u16 i;
+
+ for (i = 0; i < ARRAY_SIZE(speed_bit_map); i++) {
+ if (speed == speed_bit_map[i].speed) {
+ *speed_bit = speed_bit_map[i].speed_bit;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ u32 speed_ability = hdev->hw.mac.speed_ability;
+ u32 speed_bit = 0;
+ int ret;
+
+ ret = hclge_get_speed_bit(speed, &speed_bit);
+ if (ret)
+ return ret;
+
+ if (speed_bit & speed_ability)
+ return 0;
+
+ return -EINVAL;
+}
+
+static void hclge_update_fec_support(struct hclge_mac *mac)
+{
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT, mac->supported);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
+
+ if (mac->fec_ability & BIT(HNAE3_FEC_BASER))
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
+ mac->supported);
+ if (mac->fec_ability & BIT(HNAE3_FEC_RS))
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
+ mac->supported);
+ if (mac->fec_ability & BIT(HNAE3_FEC_LLRS))
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT,
+ mac->supported);
+ if (mac->fec_ability & BIT(HNAE3_FEC_NONE))
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
+ mac->supported);
+}
+
+static void hclge_convert_setting_sr(u16 speed_ability,
+ unsigned long *link_mode)
+{
+ if (speed_ability & HCLGE_SUPPORT_10G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
+ link_mode);
+ if (speed_ability & HCLGE_SUPPORT_25G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
+ link_mode);
+ if (speed_ability & HCLGE_SUPPORT_40G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
+ link_mode);
+ if (speed_ability & HCLGE_SUPPORT_50G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
+ link_mode);
+ if (speed_ability & HCLGE_SUPPORT_100G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
+ link_mode);
+ if (speed_ability & HCLGE_SUPPORT_200G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
+ link_mode);
+}
+
+static void hclge_convert_setting_lr(u16 speed_ability,
+ unsigned long *link_mode)
+{
+ if (speed_ability & HCLGE_SUPPORT_10G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
+ link_mode);
+ if (speed_ability & HCLGE_SUPPORT_25G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
+ link_mode);
+ if (speed_ability & HCLGE_SUPPORT_50G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
+ link_mode);
+ if (speed_ability & HCLGE_SUPPORT_40G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
+ link_mode);
+ if (speed_ability & HCLGE_SUPPORT_100G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
+ link_mode);
+ if (speed_ability & HCLGE_SUPPORT_200G_BIT)
+ linkmode_set_bit(
+ ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
+ link_mode);
+}
+
+static void hclge_convert_setting_cr(u16 speed_ability,
+ unsigned long *link_mode)
+{
+ if (speed_ability & HCLGE_SUPPORT_10G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
+ link_mode);
+ if (speed_ability & HCLGE_SUPPORT_25G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+ link_mode);
+ if (speed_ability & HCLGE_SUPPORT_40G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
+ link_mode);
+ if (speed_ability & HCLGE_SUPPORT_50G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
+ link_mode);
+ if (speed_ability & HCLGE_SUPPORT_100G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+ link_mode);
+ if (speed_ability & HCLGE_SUPPORT_200G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
+ link_mode);
+}
+
+static void hclge_convert_setting_kr(u16 speed_ability,
+ unsigned long *link_mode)
+{
+ if (speed_ability & HCLGE_SUPPORT_1G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ link_mode);
+ if (speed_ability & HCLGE_SUPPORT_10G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ link_mode);
+ if (speed_ability & HCLGE_SUPPORT_25G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+ link_mode);
+ if (speed_ability & HCLGE_SUPPORT_40G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
+ link_mode);
+ if (speed_ability & HCLGE_SUPPORT_50G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
+ link_mode);
+ if (speed_ability & HCLGE_SUPPORT_100G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
+ link_mode);
+ if (speed_ability & HCLGE_SUPPORT_200G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
+ link_mode);
+}
+
+static void hclge_convert_setting_fec(struct hclge_mac *mac)
+{
+ /* If firmware has reported fec_ability, don't need to convert by speed */
+ if (mac->fec_ability)
+ goto out;
+
+ switch (mac->speed) {
+ case HCLGE_MAC_SPEED_10G:
+ case HCLGE_MAC_SPEED_40G:
+ mac->fec_ability = BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO) |
+ BIT(HNAE3_FEC_NONE);
+ break;
+ case HCLGE_MAC_SPEED_25G:
+ case HCLGE_MAC_SPEED_50G:
+ mac->fec_ability = BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
+ BIT(HNAE3_FEC_AUTO) | BIT(HNAE3_FEC_NONE);
+ break;
+ case HCLGE_MAC_SPEED_100G:
+ mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO) |
+ BIT(HNAE3_FEC_NONE);
+ break;
+ case HCLGE_MAC_SPEED_200G:
+ mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO) |
+ BIT(HNAE3_FEC_LLRS);
+ break;
+ default:
+ mac->fec_ability = 0;
+ break;
+ }
+
+out:
+ hclge_update_fec_support(mac);
+}
+
+static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
+ u16 speed_ability)
+{
+ struct hclge_mac *mac = &hdev->hw.mac;
+
+ if (speed_ability & HCLGE_SUPPORT_1G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+ mac->supported);
+
+ hclge_convert_setting_sr(speed_ability, mac->supported);
+ hclge_convert_setting_lr(speed_ability, mac->supported);
+ hclge_convert_setting_cr(speed_ability, mac->supported);
+ if (hnae3_dev_fec_supported(hdev))
+ hclge_convert_setting_fec(mac);
+
+ if (hnae3_dev_pause_supported(hdev))
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
+
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
+}
+
+static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
+ u16 speed_ability)
+{
+ struct hclge_mac *mac = &hdev->hw.mac;
+
+ hclge_convert_setting_kr(speed_ability, mac->supported);
+ if (hnae3_dev_fec_supported(hdev))
+ hclge_convert_setting_fec(mac);
+
+ if (hnae3_dev_pause_supported(hdev))
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
+
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
+}
+
+static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
+ u16 speed_ability)
+{
+ unsigned long *supported = hdev->hw.mac.supported;
+
+ /* default to support all speed for GE port */
+ if (!speed_ability)
+ speed_ability = HCLGE_SUPPORT_GE;
+
+ if (speed_ability & HCLGE_SUPPORT_1G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ supported);
+
+ if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
+ supported);
+ }
+
+ if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
+ }
+
+ if (hnae3_dev_pause_supported(hdev)) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
+ }
+
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
+}
+
+static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
+{
+ u8 media_type = hdev->hw.mac.media_type;
+
+ if (media_type == HNAE3_MEDIA_TYPE_FIBER)
+ hclge_parse_fiber_link_mode(hdev, speed_ability);
+ else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
+ hclge_parse_copper_link_mode(hdev, speed_ability);
+ else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
+ hclge_parse_backplane_link_mode(hdev, speed_ability);
+}
+
+static u32 hclge_get_max_speed(u16 speed_ability)
+{
+ if (speed_ability & HCLGE_SUPPORT_200G_BIT)
+ return HCLGE_MAC_SPEED_200G;
+
+ if (speed_ability & HCLGE_SUPPORT_100G_BIT)
+ return HCLGE_MAC_SPEED_100G;
+
+ if (speed_ability & HCLGE_SUPPORT_50G_BIT)
+ return HCLGE_MAC_SPEED_50G;
+
+ if (speed_ability & HCLGE_SUPPORT_40G_BIT)
+ return HCLGE_MAC_SPEED_40G;
+
+ if (speed_ability & HCLGE_SUPPORT_25G_BIT)
+ return HCLGE_MAC_SPEED_25G;
+
+ if (speed_ability & HCLGE_SUPPORT_10G_BIT)
+ return HCLGE_MAC_SPEED_10G;
+
+ if (speed_ability & HCLGE_SUPPORT_1G_BIT)
+ return HCLGE_MAC_SPEED_1G;
+
+ if (speed_ability & HCLGE_SUPPORT_100M_BIT)
+ return HCLGE_MAC_SPEED_100M;
+
+ if (speed_ability & HCLGE_SUPPORT_10M_BIT)
+ return HCLGE_MAC_SPEED_10M;
+
+ return HCLGE_MAC_SPEED_1G;
+}
+
+static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
+{
+#define HCLGE_TX_SPARE_SIZE_UNIT 4096
+#define SPEED_ABILITY_EXT_SHIFT 8
+
+ struct hclge_cfg_param_cmd *req;
+ u64 mac_addr_tmp_high;
+ u16 speed_ability_ext;
+ u64 mac_addr_tmp;
+ unsigned int i;
+
+ req = (struct hclge_cfg_param_cmd *)desc[0].data;
+
+ /* get the configuration */
+ cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
+ HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
+ cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
+ HCLGE_CFG_TQP_DESC_N_M,
+ HCLGE_CFG_TQP_DESC_N_S);
+
+ cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
+ HCLGE_CFG_PHY_ADDR_M,
+ HCLGE_CFG_PHY_ADDR_S);
+ cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
+ HCLGE_CFG_MEDIA_TP_M,
+ HCLGE_CFG_MEDIA_TP_S);
+ cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
+ HCLGE_CFG_RX_BUF_LEN_M,
+ HCLGE_CFG_RX_BUF_LEN_S);
+ /* get mac_address */
+ mac_addr_tmp = __le32_to_cpu(req->param[2]);
+ mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
+ HCLGE_CFG_MAC_ADDR_H_M,
+ HCLGE_CFG_MAC_ADDR_H_S);
+
+ mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
+
+ cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
+ HCLGE_CFG_DEFAULT_SPEED_M,
+ HCLGE_CFG_DEFAULT_SPEED_S);
+ cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
+ HCLGE_CFG_RSS_SIZE_M,
+ HCLGE_CFG_RSS_SIZE_S);
+
+ for (i = 0; i < ETH_ALEN; i++)
+ cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
+
+ req = (struct hclge_cfg_param_cmd *)desc[1].data;
+ cfg->numa_node_map = __le32_to_cpu(req->param[0]);
+
+ cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
+ HCLGE_CFG_SPEED_ABILITY_M,
+ HCLGE_CFG_SPEED_ABILITY_S);
+ speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
+ HCLGE_CFG_SPEED_ABILITY_EXT_M,
+ HCLGE_CFG_SPEED_ABILITY_EXT_S);
+ cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
+
+ cfg->vlan_fliter_cap = hnae3_get_field(__le32_to_cpu(req->param[1]),
+ HCLGE_CFG_VLAN_FLTR_CAP_M,
+ HCLGE_CFG_VLAN_FLTR_CAP_S);
+
+ cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
+ HCLGE_CFG_UMV_TBL_SPACE_M,
+ HCLGE_CFG_UMV_TBL_SPACE_S);
+
+ cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]),
+ HCLGE_CFG_PF_RSS_SIZE_M,
+ HCLGE_CFG_PF_RSS_SIZE_S);
+
+ /* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a
+ * power of 2, instead of reading out directly. This would
+ * be more flexible for future changes and expansions.
+ * When VF max rss size field is HCLGE_CFG_RSS_SIZE_S,
+ * it does not make sense if PF's field is 0. In this case, PF and VF
+ * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S.
+ */
+ cfg->pf_rss_size_max = cfg->pf_rss_size_max ?
+ 1U << cfg->pf_rss_size_max :
+ cfg->vf_rss_size_max;
+
+ /* The unit of the tx spare buffer size queried from configuration
+ * file is HCLGE_TX_SPARE_SIZE_UNIT(4096) bytes, so a conversion is
+ * needed here.
+ */
+ cfg->tx_spare_buf_size = hnae3_get_field(__le32_to_cpu(req->param[2]),
+ HCLGE_CFG_TX_SPARE_BUF_SIZE_M,
+ HCLGE_CFG_TX_SPARE_BUF_SIZE_S);
+ cfg->tx_spare_buf_size *= HCLGE_TX_SPARE_SIZE_UNIT;
+}
+
+/* hclge_get_cfg: query the static parameter from flash
+ * @hdev: pointer to struct hclge_dev
+ * @hcfg: the config structure to be getted
+ */
+static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
+{
+ struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
+ struct hclge_cfg_param_cmd *req;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
+ u32 offset = 0;
+
+ req = (struct hclge_cfg_param_cmd *)desc[i].data;
+ hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
+ true);
+ hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
+ HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
+ /* Len should be united by 4 bytes when send to hardware */
+ hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
+ HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
+ req->offset = cpu_to_le32(offset);
+ }
+
+ ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
+ return ret;
+ }
+
+ hclge_parse_cfg(hcfg, desc);
+
+ return 0;
+}
+
+static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
+{
+#define HCLGE_MAX_NON_TSO_BD_NUM 8U
+
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+
+ ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
+ ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
+ ae_dev->dev_specs.rss_key_size = HCLGE_COMM_RSS_KEY_SIZE;
+ ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
+ ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
+ ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
+ ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
+ ae_dev->dev_specs.umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
+ ae_dev->dev_specs.tnl_num = 0;
+}
+
+static void hclge_parse_dev_specs(struct hclge_dev *hdev,
+ struct hclge_desc *desc)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ struct hclge_dev_specs_0_cmd *req0;
+ struct hclge_dev_specs_1_cmd *req1;
+
+ req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
+ req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data;
+
+ ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
+ ae_dev->dev_specs.rss_ind_tbl_size =
+ le16_to_cpu(req0->rss_ind_tbl_size);
+ ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
+ ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
+ ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
+ ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num);
+ ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
+ ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
+ ae_dev->dev_specs.umv_size = le16_to_cpu(req1->umv_size);
+ ae_dev->dev_specs.mc_mac_size = le16_to_cpu(req1->mc_mac_size);
+ ae_dev->dev_specs.tnl_num = req1->tnl_num;
+}
+
+static void hclge_check_dev_specs(struct hclge_dev *hdev)
+{
+ struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
+
+ if (!dev_specs->max_non_tso_bd_num)
+ dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
+ if (!dev_specs->rss_ind_tbl_size)
+ dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
+ if (!dev_specs->rss_key_size)
+ dev_specs->rss_key_size = HCLGE_COMM_RSS_KEY_SIZE;
+ if (!dev_specs->max_tm_rate)
+ dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
+ if (!dev_specs->max_qset_num)
+ dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM;
+ if (!dev_specs->max_int_gl)
+ dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
+ if (!dev_specs->max_frm_size)
+ dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME;
+ if (!dev_specs->umv_size)
+ dev_specs->umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
+}
+
+static int hclge_query_mac_stats_num(struct hclge_dev *hdev)
+{
+ u32 reg_num = 0;
+ int ret;
+
+ ret = hclge_mac_query_reg_num(hdev, &reg_num);
+ if (ret && ret != -EOPNOTSUPP)
+ return ret;
+
+ hdev->ae_dev->dev_specs.mac_stats_num = reg_num;
+ return 0;
+}
+
+static int hclge_query_dev_specs(struct hclge_dev *hdev)
+{
+ struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
+ int ret;
+ int i;
+
+ ret = hclge_query_mac_stats_num(hdev);
+ if (ret)
+ return ret;
+
+ /* set default specifications as devices lower than version V3 do not
+ * support querying specifications from firmware.
+ */
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
+ hclge_set_default_dev_specs(hdev);
+ return 0;
+ }
+
+ for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
+ hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
+ true);
+ desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ }
+ hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
+
+ ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
+ if (ret)
+ return ret;
+
+ hclge_parse_dev_specs(hdev, desc);
+ hclge_check_dev_specs(hdev);
+
+ return 0;
+}
+
+static int hclge_get_cap(struct hclge_dev *hdev)
+{
+ int ret;
+
+ ret = hclge_query_function_status(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "query function status error %d.\n", ret);
+ return ret;
+ }
+
+ /* get pf resource */
+ return hclge_query_pf_resource(hdev);
+}
+
+static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
+{
+#define HCLGE_MIN_TX_DESC 64
+#define HCLGE_MIN_RX_DESC 64
+
+ if (!is_kdump_kernel())
+ return;
+
+ dev_info(&hdev->pdev->dev,
+ "Running kdump kernel. Using minimal resources\n");
+
+ /* minimal queue pairs equals to the number of vports */
+ hdev->num_tqps = hdev->num_req_vfs + 1;
+ hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
+ hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
+}
+
+static void hclge_init_tc_config(struct hclge_dev *hdev)
+{
+ unsigned int i;
+
+ if (hdev->tc_max > HNAE3_MAX_TC ||
+ hdev->tc_max < 1) {
+ dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
+ hdev->tc_max);
+ hdev->tc_max = 1;
+ }
+
+ /* Dev does not support DCB */
+ if (!hnae3_dev_dcb_supported(hdev)) {
+ hdev->tc_max = 1;
+ hdev->pfc_max = 0;
+ } else {
+ hdev->pfc_max = hdev->tc_max;
+ }
+
+ hdev->tm_info.num_tc = 1;
+
+ /* Currently not support uncontiuous tc */
+ for (i = 0; i < hdev->tm_info.num_tc; i++)
+ hnae3_set_bit(hdev->hw_tc_map, i, 1);
+
+ hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
+}
+
+static int hclge_configure(struct hclge_dev *hdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ struct hclge_cfg cfg;
+ int ret;
+
+ ret = hclge_get_cfg(hdev, &cfg);
+ if (ret)
+ return ret;
+
+ hdev->base_tqp_pid = 0;
+ hdev->vf_rss_size_max = cfg.vf_rss_size_max;
+ hdev->pf_rss_size_max = cfg.pf_rss_size_max;
+ hdev->rx_buf_len = cfg.rx_buf_len;
+ ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
+ hdev->hw.mac.media_type = cfg.media_type;
+ hdev->hw.mac.phy_addr = cfg.phy_addr;
+ hdev->num_tx_desc = cfg.tqp_desc_num;
+ hdev->num_rx_desc = cfg.tqp_desc_num;
+ hdev->tm_info.num_pg = 1;
+ hdev->tc_max = cfg.tc_num;
+ hdev->tm_info.hw_pfc_map = 0;
+ if (cfg.umv_space)
+ hdev->wanted_umv_size = cfg.umv_space;
+ else
+ hdev->wanted_umv_size = hdev->ae_dev->dev_specs.umv_size;
+ hdev->tx_spare_buf_size = cfg.tx_spare_buf_size;
+ hdev->gro_en = true;
+ if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF)
+ set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
+
+ if (hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
+ hdev->fd_en = true;
+ hdev->fd_active_type = HCLGE_FD_RULE_NONE;
+ }
+
+ ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
+ cfg.default_speed, ret);
+ return ret;
+ }
+
+ hclge_parse_link_mode(hdev, cfg.speed_ability);
+
+ hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
+
+ hclge_init_tc_config(hdev);
+ hclge_init_kdump_kernel_config(hdev);
+
+ return ret;
+}
+
+static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
+ u16 tso_mss_max)
+{
+ struct hclge_cfg_tso_status_cmd *req;
+ struct hclge_desc desc;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
+
+ req = (struct hclge_cfg_tso_status_cmd *)desc.data;
+ req->tso_mss_min = cpu_to_le16(tso_mss_min);
+ req->tso_mss_max = cpu_to_le16(tso_mss_max);
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
+static int hclge_config_gro(struct hclge_dev *hdev)
+{
+ struct hclge_cfg_gro_status_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ if (!hnae3_ae_dev_gro_supported(hdev->ae_dev))
+ return 0;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
+ req = (struct hclge_cfg_gro_status_cmd *)desc.data;
+
+ req->gro_en = hdev->gro_en ? 1 : 0;
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "GRO hardware config cmd failed, ret = %d\n", ret);
+
+ return ret;
+}
+
+static int hclge_alloc_tqps(struct hclge_dev *hdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ struct hclge_comm_tqp *tqp;
+ int i;
+
+ hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
+ sizeof(struct hclge_comm_tqp), GFP_KERNEL);
+ if (!hdev->htqp)
+ return -ENOMEM;
+
+ tqp = hdev->htqp;
+
+ for (i = 0; i < hdev->num_tqps; i++) {
+ tqp->dev = &hdev->pdev->dev;
+ tqp->index = i;
+
+ tqp->q.ae_algo = &ae_algo;
+ tqp->q.buf_size = hdev->rx_buf_len;
+ tqp->q.tx_desc_num = hdev->num_tx_desc;
+ tqp->q.rx_desc_num = hdev->num_rx_desc;
+
+ /* need an extended offset to configure queues >=
+ * HCLGE_TQP_MAX_SIZE_DEV_V2
+ */
+ if (i < HCLGE_TQP_MAX_SIZE_DEV_V2)
+ tqp->q.io_base = hdev->hw.hw.io_base +
+ HCLGE_TQP_REG_OFFSET +
+ i * HCLGE_TQP_REG_SIZE;
+ else
+ tqp->q.io_base = hdev->hw.hw.io_base +
+ HCLGE_TQP_REG_OFFSET +
+ HCLGE_TQP_EXT_REG_OFFSET +
+ (i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
+ HCLGE_TQP_REG_SIZE;
+
+ /* when device supports tx push and has device memory,
+ * the queue can execute push mode or doorbell mode on
+ * device memory.
+ */
+ if (test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps))
+ tqp->q.mem_base = hdev->hw.hw.mem_base +
+ HCLGE_TQP_MEM_OFFSET(hdev, i);
+
+ tqp++;
+ }
+
+ return 0;
+}
+
+static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
+ u16 tqp_pid, u16 tqp_vid, bool is_pf)
+{
+ struct hclge_tqp_map_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
+
+ req = (struct hclge_tqp_map_cmd *)desc.data;
+ req->tqp_id = cpu_to_le16(tqp_pid);
+ req->tqp_vf = func_id;
+ req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
+ if (!is_pf)
+ req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
+ req->tqp_vid = cpu_to_le16(tqp_vid);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
+
+ return ret;
+}
+
+static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
+{
+ struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
+ struct hclge_dev *hdev = vport->back;
+ int i, alloced;
+
+ for (i = 0, alloced = 0; i < hdev->num_tqps &&
+ alloced < num_tqps; i++) {
+ if (!hdev->htqp[i].alloced) {
+ hdev->htqp[i].q.handle = &vport->nic;
+ hdev->htqp[i].q.tqp_index = alloced;
+ hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
+ hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
+ kinfo->tqp[alloced] = &hdev->htqp[i].q;
+ hdev->htqp[i].alloced = true;
+ alloced++;
+ }
+ }
+ vport->alloc_tqps = alloced;
+ kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max,
+ vport->alloc_tqps / hdev->tm_info.num_tc);
+
+ /* ensure one to one mapping between irq and queue at default */
+ kinfo->rss_size = min_t(u16, kinfo->rss_size,
+ (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
+
+ return 0;
+}
+
+static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
+ u16 num_tx_desc, u16 num_rx_desc)
+
+{
+ struct hnae3_handle *nic = &vport->nic;
+ struct hnae3_knic_private_info *kinfo = &nic->kinfo;
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ kinfo->num_tx_desc = num_tx_desc;
+ kinfo->num_rx_desc = num_rx_desc;
+
+ kinfo->rx_buf_len = hdev->rx_buf_len;
+ kinfo->tx_spare_buf_size = hdev->tx_spare_buf_size;
+
+ kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
+ sizeof(struct hnae3_queue *), GFP_KERNEL);
+ if (!kinfo->tqp)
+ return -ENOMEM;
+
+ ret = hclge_assign_tqp(vport, num_tqps);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
+
+ return ret;
+}
+
+static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
+ struct hclge_vport *vport)
+{
+ struct hnae3_handle *nic = &vport->nic;
+ struct hnae3_knic_private_info *kinfo;
+ u16 i;
+
+ kinfo = &nic->kinfo;
+ for (i = 0; i < vport->alloc_tqps; i++) {
+ struct hclge_comm_tqp *q =
+ container_of(kinfo->tqp[i], struct hclge_comm_tqp, q);
+ bool is_pf;
+ int ret;
+
+ is_pf = !(vport->vport_id);
+ ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
+ i, is_pf);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hclge_map_tqp(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport = hdev->vport;
+ u16 i, num_vport;
+
+ num_vport = hdev->num_req_vfs + 1;
+ for (i = 0; i < num_vport; i++) {
+ int ret;
+
+ ret = hclge_map_tqp_to_vport(hdev, vport);
+ if (ret)
+ return ret;
+
+ vport++;
+ }
+
+ return 0;
+}
+
+static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
+{
+ struct hnae3_handle *nic = &vport->nic;
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ nic->pdev = hdev->pdev;
+ nic->ae_algo = &ae_algo;
+ nic->numa_node_mask = hdev->numa_node_mask;
+ nic->kinfo.io_base = hdev->hw.hw.io_base;
+
+ ret = hclge_knic_setup(vport, num_tqps,
+ hdev->num_tx_desc, hdev->num_rx_desc);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
+
+ return ret;
+}
+
+static int hclge_alloc_vport(struct hclge_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ struct hclge_vport *vport;
+ u32 tqp_main_vport;
+ u32 tqp_per_vport;
+ int num_vport, i;
+ int ret;
+
+ /* We need to alloc a vport for main NIC of PF */
+ num_vport = hdev->num_req_vfs + 1;
+
+ if (hdev->num_tqps < num_vport) {
+ dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
+ hdev->num_tqps, num_vport);
+ return -EINVAL;
+ }
+
+ /* Alloc the same number of TQPs for every vport */
+ tqp_per_vport = hdev->num_tqps / num_vport;
+ tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
+
+ vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
+ GFP_KERNEL);
+ if (!vport)
+ return -ENOMEM;
+
+ hdev->vport = vport;
+ hdev->num_alloc_vport = num_vport;
+
+ if (IS_ENABLED(CONFIG_PCI_IOV))
+ hdev->num_alloc_vfs = hdev->num_req_vfs;
+
+ for (i = 0; i < num_vport; i++) {
+ vport->back = hdev;
+ vport->vport_id = i;
+ vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
+ vport->mps = HCLGE_MAC_DEFAULT_FRAME;
+ vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
+ vport->port_base_vlan_cfg.tbl_sta = true;
+ vport->rxvlan_cfg.rx_vlan_offload_en = true;
+ vport->req_vlan_fltr_en = true;
+ INIT_LIST_HEAD(&vport->vlan_list);
+ INIT_LIST_HEAD(&vport->uc_mac_list);
+ INIT_LIST_HEAD(&vport->mc_mac_list);
+ spin_lock_init(&vport->mac_list_lock);
+
+ if (i == 0)
+ ret = hclge_vport_setup(vport, tqp_main_vport);
+ else
+ ret = hclge_vport_setup(vport, tqp_per_vport);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "vport setup failed for vport %d, %d\n",
+ i, ret);
+ return ret;
+ }
+
+ vport++;
+ }
+
+ return 0;
+}
+
+static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
+ struct hclge_pkt_buf_alloc *buf_alloc)
+{
+/* TX buffer size is unit by 128 byte */
+#define HCLGE_BUF_SIZE_UNIT_SHIFT 7
+#define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
+ struct hclge_tx_buff_alloc_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+ u8 i;
+
+ req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
+ for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
+ u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
+
+ req->tx_pkt_buff[i] =
+ cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
+ HCLGE_BUF_SIZE_UPDATE_EN_MSK);
+ }
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
+ ret);
+
+ return ret;
+}
+
+static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
+ struct hclge_pkt_buf_alloc *buf_alloc)
+{
+ int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
+
+ if (ret)
+ dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
+
+ return ret;
+}
+
+static u32 hclge_get_tc_num(struct hclge_dev *hdev)
+{
+ unsigned int i;
+ u32 cnt = 0;
+
+ for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
+ if (hdev->hw_tc_map & BIT(i))
+ cnt++;
+ return cnt;
+}
+
+/* Get the number of pfc enabled TCs, which have private buffer */
+static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
+ struct hclge_pkt_buf_alloc *buf_alloc)
+{
+ struct hclge_priv_buf *priv;
+ unsigned int i;
+ int cnt = 0;
+
+ for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
+ priv = &buf_alloc->priv_buf[i];
+ if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
+ priv->enable)
+ cnt++;
+ }
+
+ return cnt;
+}
+
+/* Get the number of pfc disabled TCs, which have private buffer */
+static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
+ struct hclge_pkt_buf_alloc *buf_alloc)
+{
+ struct hclge_priv_buf *priv;
+ unsigned int i;
+ int cnt = 0;
+
+ for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
+ priv = &buf_alloc->priv_buf[i];
+ if (hdev->hw_tc_map & BIT(i) &&
+ !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
+ priv->enable)
+ cnt++;
+ }
+
+ return cnt;
+}
+
+static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
+{
+ struct hclge_priv_buf *priv;
+ u32 rx_priv = 0;
+ int i;
+
+ for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
+ priv = &buf_alloc->priv_buf[i];
+ if (priv->enable)
+ rx_priv += priv->buf_size;
+ }
+ return rx_priv;
+}
+
+static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
+{
+ u32 i, total_tx_size = 0;
+
+ for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
+ total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
+
+ return total_tx_size;
+}
+
+static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
+ struct hclge_pkt_buf_alloc *buf_alloc,
+ u32 rx_all)
+{
+ u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
+ u32 tc_num = hclge_get_tc_num(hdev);
+ u32 shared_buf, aligned_mps;
+ u32 rx_priv;
+ int i;
+
+ aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
+
+ if (hnae3_dev_dcb_supported(hdev))
+ shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
+ hdev->dv_buf_size;
+ else
+ shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
+ + hdev->dv_buf_size;
+
+ shared_buf_tc = tc_num * aligned_mps + aligned_mps;
+ shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
+ HCLGE_BUF_SIZE_UNIT);
+
+ rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
+ if (rx_all < rx_priv + shared_std)
+ return false;
+
+ shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
+ buf_alloc->s_buf.buf_size = shared_buf;
+ if (hnae3_dev_dcb_supported(hdev)) {
+ buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
+ buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
+ - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
+ HCLGE_BUF_SIZE_UNIT);
+ } else {
+ buf_alloc->s_buf.self.high = aligned_mps +
+ HCLGE_NON_DCB_ADDITIONAL_BUF;
+ buf_alloc->s_buf.self.low = aligned_mps;
+ }
+
+ if (hnae3_dev_dcb_supported(hdev)) {
+ hi_thrd = shared_buf - hdev->dv_buf_size;
+
+ if (tc_num <= NEED_RESERVE_TC_NUM)
+ hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
+ / BUF_MAX_PERCENT;
+
+ if (tc_num)
+ hi_thrd = hi_thrd / tc_num;
+
+ hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
+ hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
+ lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
+ } else {
+ hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
+ lo_thrd = aligned_mps;
+ }
+
+ for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
+ buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
+ buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
+ }
+
+ return true;
+}
+
+static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
+ struct hclge_pkt_buf_alloc *buf_alloc)
+{
+ u32 i, total_size;
+
+ total_size = hdev->pkt_buf_size;
+
+ /* alloc tx buffer for all enabled tc */
+ for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
+ struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
+
+ if (hdev->hw_tc_map & BIT(i)) {
+ if (total_size < hdev->tx_buf_size)
+ return -ENOMEM;
+
+ priv->tx_buf_size = hdev->tx_buf_size;
+ } else {
+ priv->tx_buf_size = 0;
+ }
+
+ total_size -= priv->tx_buf_size;
+ }
+
+ return 0;
+}
+
+static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
+ struct hclge_pkt_buf_alloc *buf_alloc)
+{
+ u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
+ u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
+ unsigned int i;
+
+ for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
+ struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
+
+ priv->enable = 0;
+ priv->wl.low = 0;
+ priv->wl.high = 0;
+ priv->buf_size = 0;
+
+ if (!(hdev->hw_tc_map & BIT(i)))
+ continue;
+
+ priv->enable = 1;
+
+ if (hdev->tm_info.hw_pfc_map & BIT(i)) {
+ priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
+ priv->wl.high = roundup(priv->wl.low + aligned_mps,
+ HCLGE_BUF_SIZE_UNIT);
+ } else {
+ priv->wl.low = 0;
+ priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
+ aligned_mps;
+ }
+
+ priv->buf_size = priv->wl.high + hdev->dv_buf_size;
+ }
+
+ return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
+}
+
+static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
+ struct hclge_pkt_buf_alloc *buf_alloc)
+{
+ u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
+ int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
+ int i;
+
+ /* let the last to be cleared first */
+ for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
+ struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
+ unsigned int mask = BIT((unsigned int)i);
+
+ if (hdev->hw_tc_map & mask &&
+ !(hdev->tm_info.hw_pfc_map & mask)) {
+ /* Clear the no pfc TC private buffer */
+ priv->wl.low = 0;
+ priv->wl.high = 0;
+ priv->buf_size = 0;
+ priv->enable = 0;
+ no_pfc_priv_num--;
+ }
+
+ if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
+ no_pfc_priv_num == 0)
+ break;
+ }
+
+ return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
+}
+
+static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
+ struct hclge_pkt_buf_alloc *buf_alloc)
+{
+ u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
+ int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
+ int i;
+
+ /* let the last to be cleared first */
+ for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
+ struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
+ unsigned int mask = BIT((unsigned int)i);
+
+ if (hdev->hw_tc_map & mask &&
+ hdev->tm_info.hw_pfc_map & mask) {
+ /* Reduce the number of pfc TC with private buffer */
+ priv->wl.low = 0;
+ priv->enable = 0;
+ priv->wl.high = 0;
+ priv->buf_size = 0;
+ pfc_priv_num--;
+ }
+
+ if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
+ pfc_priv_num == 0)
+ break;
+ }
+
+ return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
+}
+
+static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
+ struct hclge_pkt_buf_alloc *buf_alloc)
+{
+#define COMPENSATE_BUFFER 0x3C00
+#define COMPENSATE_HALF_MPS_NUM 5
+#define PRIV_WL_GAP 0x1800
+
+ u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
+ u32 tc_num = hclge_get_tc_num(hdev);
+ u32 half_mps = hdev->mps >> 1;
+ u32 min_rx_priv;
+ unsigned int i;
+
+ if (tc_num)
+ rx_priv = rx_priv / tc_num;
+
+ if (tc_num <= NEED_RESERVE_TC_NUM)
+ rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
+
+ min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
+ COMPENSATE_HALF_MPS_NUM * half_mps;
+ min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
+ rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
+ if (rx_priv < min_rx_priv)
+ return false;
+
+ for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
+ struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
+
+ priv->enable = 0;
+ priv->wl.low = 0;
+ priv->wl.high = 0;
+ priv->buf_size = 0;
+
+ if (!(hdev->hw_tc_map & BIT(i)))
+ continue;
+
+ priv->enable = 1;
+ priv->buf_size = rx_priv;
+ priv->wl.high = rx_priv - hdev->dv_buf_size;
+ priv->wl.low = priv->wl.high - PRIV_WL_GAP;
+ }
+
+ buf_alloc->s_buf.buf_size = 0;
+
+ return true;
+}
+
+/* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
+ * @hdev: pointer to struct hclge_dev
+ * @buf_alloc: pointer to buffer calculation data
+ * @return: 0: calculate successful, negative: fail
+ */
+static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
+ struct hclge_pkt_buf_alloc *buf_alloc)
+{
+ /* When DCB is not supported, rx private buffer is not allocated. */
+ if (!hnae3_dev_dcb_supported(hdev)) {
+ u32 rx_all = hdev->pkt_buf_size;
+
+ rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
+ if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
+ return -ENOMEM;
+
+ return 0;
+ }
+
+ if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
+ return 0;
+
+ if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
+ return 0;
+
+ /* try to decrease the buffer size */
+ if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
+ return 0;
+
+ if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
+ return 0;
+
+ if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
+ return 0;
+
+ return -ENOMEM;
+}
+
+static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
+ struct hclge_pkt_buf_alloc *buf_alloc)
+{
+ struct hclge_rx_priv_buff_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+ int i;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
+ req = (struct hclge_rx_priv_buff_cmd *)desc.data;
+
+ /* Alloc private buffer TCs */
+ for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
+ struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
+
+ req->buf_num[i] =
+ cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
+ req->buf_num[i] |=
+ cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
+ }
+
+ req->shared_buf =
+ cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
+ (1 << HCLGE_TC0_PRI_BUF_EN_B));
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "rx private buffer alloc cmd failed %d\n", ret);
+
+ return ret;
+}
+
+static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
+ struct hclge_pkt_buf_alloc *buf_alloc)
+{
+ struct hclge_rx_priv_wl_buf *req;
+ struct hclge_priv_buf *priv;
+ struct hclge_desc desc[2];
+ int i, j;
+ int ret;
+
+ for (i = 0; i < 2; i++) {
+ hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
+ false);
+ req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
+
+ /* The first descriptor set the NEXT bit to 1 */
+ if (i == 0)
+ desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ else
+ desc[i].flag &= ~cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+
+ for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
+ u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
+
+ priv = &buf_alloc->priv_buf[idx];
+ req->tc_wl[j].high =
+ cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
+ req->tc_wl[j].high |=
+ cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
+ req->tc_wl[j].low =
+ cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
+ req->tc_wl[j].low |=
+ cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
+ }
+ }
+
+ /* Send 2 descriptor at one time */
+ ret = hclge_cmd_send(&hdev->hw, desc, 2);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "rx private waterline config cmd failed %d\n",
+ ret);
+ return ret;
+}
+
+static int hclge_common_thrd_config(struct hclge_dev *hdev,
+ struct hclge_pkt_buf_alloc *buf_alloc)
+{
+ struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
+ struct hclge_rx_com_thrd *req;
+ struct hclge_desc desc[2];
+ struct hclge_tc_thrd *tc;
+ int i, j;
+ int ret;
+
+ for (i = 0; i < 2; i++) {
+ hclge_cmd_setup_basic_desc(&desc[i],
+ HCLGE_OPC_RX_COM_THRD_ALLOC, false);
+ req = (struct hclge_rx_com_thrd *)&desc[i].data;
+
+ /* The first descriptor set the NEXT bit to 1 */
+ if (i == 0)
+ desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ else
+ desc[i].flag &= ~cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+
+ for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
+ tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
+
+ req->com_thrd[j].high =
+ cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
+ req->com_thrd[j].high |=
+ cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
+ req->com_thrd[j].low =
+ cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
+ req->com_thrd[j].low |=
+ cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
+ }
+ }
+
+ /* Send 2 descriptors at one time */
+ ret = hclge_cmd_send(&hdev->hw, desc, 2);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "common threshold config cmd failed %d\n", ret);
+ return ret;
+}
+
+static int hclge_common_wl_config(struct hclge_dev *hdev,
+ struct hclge_pkt_buf_alloc *buf_alloc)
+{
+ struct hclge_shared_buf *buf = &buf_alloc->s_buf;
+ struct hclge_rx_com_wl *req;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
+
+ req = (struct hclge_rx_com_wl *)desc.data;
+ req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
+ req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
+
+ req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
+ req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "common waterline config cmd failed %d\n", ret);
+
+ return ret;
+}
+
+int hclge_buffer_alloc(struct hclge_dev *hdev)
+{
+ struct hclge_pkt_buf_alloc *pkt_buf;
+ int ret;
+
+ pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
+ if (!pkt_buf)
+ return -ENOMEM;
+
+ ret = hclge_tx_buffer_calc(hdev, pkt_buf);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "could not calc tx buffer size for all TCs %d\n", ret);
+ goto out;
+ }
+
+ ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "could not alloc tx buffers %d\n", ret);
+ goto out;
+ }
+
+ ret = hclge_rx_buffer_calc(hdev, pkt_buf);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "could not calc rx priv buffer size for all TCs %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
+ ret);
+ goto out;
+ }
+
+ if (hnae3_dev_dcb_supported(hdev)) {
+ ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "could not configure rx private waterline %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = hclge_common_thrd_config(hdev, pkt_buf);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "could not configure common threshold %d\n",
+ ret);
+ goto out;
+ }
+ }
+
+ ret = hclge_common_wl_config(hdev, pkt_buf);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "could not configure common waterline %d\n", ret);
+
+out:
+ kfree(pkt_buf);
+ return ret;
+}
+
+static int hclge_init_roce_base_info(struct hclge_vport *vport)
+{
+ struct hnae3_handle *roce = &vport->roce;
+ struct hnae3_handle *nic = &vport->nic;
+ struct hclge_dev *hdev = vport->back;
+
+ roce->rinfo.num_vectors = vport->back->num_roce_msi;
+
+ if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi)
+ return -EINVAL;
+
+ roce->rinfo.base_vector = hdev->num_nic_msi;
+
+ roce->rinfo.netdev = nic->kinfo.netdev;
+ roce->rinfo.roce_io_base = hdev->hw.hw.io_base;
+ roce->rinfo.roce_mem_base = hdev->hw.hw.mem_base;
+
+ roce->pdev = nic->pdev;
+ roce->ae_algo = nic->ae_algo;
+ roce->numa_node_mask = nic->numa_node_mask;
+
+ return 0;
+}
+
+static int hclge_init_msi(struct hclge_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ int vectors;
+ int i;
+
+ vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
+ hdev->num_msi,
+ PCI_IRQ_MSI | PCI_IRQ_MSIX);
+ if (vectors < 0) {
+ dev_err(&pdev->dev,
+ "failed(%d) to allocate MSI/MSI-X vectors\n",
+ vectors);
+ return vectors;
+ }
+ if (vectors < hdev->num_msi)
+ dev_warn(&hdev->pdev->dev,
+ "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
+ hdev->num_msi, vectors);
+
+ hdev->num_msi = vectors;
+ hdev->num_msi_left = vectors;
+
+ hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
+ sizeof(u16), GFP_KERNEL);
+ if (!hdev->vector_status) {
+ pci_free_irq_vectors(pdev);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < hdev->num_msi; i++)
+ hdev->vector_status[i] = HCLGE_INVALID_VPORT;
+
+ hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
+ sizeof(int), GFP_KERNEL);
+ if (!hdev->vector_irq) {
+ pci_free_irq_vectors(pdev);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static u8 hclge_check_speed_dup(u8 duplex, int speed)
+{
+ if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
+ duplex = HCLGE_MAC_FULL;
+
+ return duplex;
+}
+
+static struct hclge_mac_speed_map hclge_mac_speed_map_to_fw[] = {
+ {HCLGE_MAC_SPEED_10M, HCLGE_FW_MAC_SPEED_10M},
+ {HCLGE_MAC_SPEED_100M, HCLGE_FW_MAC_SPEED_100M},
+ {HCLGE_MAC_SPEED_1G, HCLGE_FW_MAC_SPEED_1G},
+ {HCLGE_MAC_SPEED_10G, HCLGE_FW_MAC_SPEED_10G},
+ {HCLGE_MAC_SPEED_25G, HCLGE_FW_MAC_SPEED_25G},
+ {HCLGE_MAC_SPEED_40G, HCLGE_FW_MAC_SPEED_40G},
+ {HCLGE_MAC_SPEED_50G, HCLGE_FW_MAC_SPEED_50G},
+ {HCLGE_MAC_SPEED_100G, HCLGE_FW_MAC_SPEED_100G},
+ {HCLGE_MAC_SPEED_200G, HCLGE_FW_MAC_SPEED_200G},
+};
+
+static int hclge_convert_to_fw_speed(u32 speed_drv, u32 *speed_fw)
+{
+ u16 i;
+
+ for (i = 0; i < ARRAY_SIZE(hclge_mac_speed_map_to_fw); i++) {
+ if (hclge_mac_speed_map_to_fw[i].speed_drv == speed_drv) {
+ *speed_fw = hclge_mac_speed_map_to_fw[i].speed_fw;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
+ u8 duplex, u8 lane_num)
+{
+ struct hclge_config_mac_speed_dup_cmd *req;
+ struct hclge_desc desc;
+ u32 speed_fw;
+ int ret;
+
+ req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
+
+ if (duplex)
+ hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
+
+ ret = hclge_convert_to_fw_speed(speed, &speed_fw);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
+ return ret;
+ }
+
+ hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, HCLGE_CFG_SPEED_S,
+ speed_fw);
+ hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
+ 1);
+ req->lane_num = lane_num;
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "mac speed/duplex config cmd failed %d.\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex, u8 lane_num)
+{
+ struct hclge_mac *mac = &hdev->hw.mac;
+ int ret;
+
+ duplex = hclge_check_speed_dup(duplex, speed);
+ if (!mac->support_autoneg && mac->speed == speed &&
+ mac->duplex == duplex && (mac->lane_num == lane_num || lane_num == 0))
+ return 0;
+
+ ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex, lane_num);
+ if (ret)
+ return ret;
+
+ hdev->hw.mac.speed = speed;
+ hdev->hw.mac.duplex = duplex;
+ if (!lane_num)
+ hdev->hw.mac.lane_num = lane_num;
+
+ return 0;
+}
+
+static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
+ u8 duplex, u8 lane_num)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ return hclge_cfg_mac_speed_dup(hdev, speed, duplex, lane_num);
+}
+
+static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
+{
+ struct hclge_config_auto_neg_cmd *req;
+ struct hclge_desc desc;
+ u32 flag = 0;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
+
+ req = (struct hclge_config_auto_neg_cmd *)desc.data;
+ if (enable)
+ hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
+ req->cfg_an_cmd_flag = cpu_to_le32(flag);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
+ ret);
+
+ return ret;
+}
+
+static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ if (!hdev->hw.mac.support_autoneg) {
+ if (enable) {
+ dev_err(&hdev->pdev->dev,
+ "autoneg is not supported by current port\n");
+ return -EOPNOTSUPP;
+ } else {
+ return 0;
+ }
+ }
+
+ return hclge_set_autoneg_en(hdev, enable);
+}
+
+static int hclge_get_autoneg(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+
+ if (phydev)
+ return phydev->autoneg;
+
+ return hdev->hw.mac.autoneg;
+}
+
+static int hclge_restart_autoneg(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
+
+ ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ if (ret)
+ return ret;
+ return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+}
+
+static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
+ return hclge_set_autoneg_en(hdev, !halt);
+
+ return 0;
+}
+
+static void hclge_parse_fec_stats_lanes(struct hclge_dev *hdev,
+ struct hclge_desc *desc, u32 desc_len)
+{
+ u32 lane_size = HCLGE_FEC_STATS_MAX_LANES * 2;
+ u32 desc_index = 0;
+ u32 data_index = 0;
+ u32 i;
+
+ for (i = 0; i < lane_size; i++) {
+ if (data_index >= HCLGE_DESC_DATA_LEN) {
+ desc_index++;
+ data_index = 0;
+ }
+
+ if (desc_index >= desc_len)
+ return;
+
+ hdev->fec_stats.per_lanes[i] +=
+ le32_to_cpu(desc[desc_index].data[data_index]);
+ data_index++;
+ }
+}
+
+static void hclge_parse_fec_stats(struct hclge_dev *hdev,
+ struct hclge_desc *desc, u32 desc_len)
+{
+ struct hclge_query_fec_stats_cmd *req;
+
+ req = (struct hclge_query_fec_stats_cmd *)desc[0].data;
+
+ hdev->fec_stats.base_r_lane_num = req->base_r_lane_num;
+ hdev->fec_stats.rs_corr_blocks +=
+ le32_to_cpu(req->rs_fec_corr_blocks);
+ hdev->fec_stats.rs_uncorr_blocks +=
+ le32_to_cpu(req->rs_fec_uncorr_blocks);
+ hdev->fec_stats.rs_error_blocks +=
+ le32_to_cpu(req->rs_fec_error_blocks);
+ hdev->fec_stats.base_r_corr_blocks +=
+ le32_to_cpu(req->base_r_fec_corr_blocks);
+ hdev->fec_stats.base_r_uncorr_blocks +=
+ le32_to_cpu(req->base_r_fec_uncorr_blocks);
+
+ hclge_parse_fec_stats_lanes(hdev, &desc[1], desc_len - 1);
+}
+
+static int hclge_update_fec_stats_hw(struct hclge_dev *hdev)
+{
+ struct hclge_desc desc[HCLGE_FEC_STATS_CMD_NUM];
+ int ret;
+ u32 i;
+
+ for (i = 0; i < HCLGE_FEC_STATS_CMD_NUM; i++) {
+ hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_FEC_STATS,
+ true);
+ if (i != (HCLGE_FEC_STATS_CMD_NUM - 1))
+ desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ }
+
+ ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_FEC_STATS_CMD_NUM);
+ if (ret)
+ return ret;
+
+ hclge_parse_fec_stats(hdev, desc, HCLGE_FEC_STATS_CMD_NUM);
+
+ return 0;
+}
+
+static void hclge_update_fec_stats(struct hclge_dev *hdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ int ret;
+
+ if (!hnae3_ae_dev_fec_stats_supported(ae_dev) ||
+ test_and_set_bit(HCLGE_STATE_FEC_STATS_UPDATING, &hdev->state))
+ return;
+
+ ret = hclge_update_fec_stats_hw(hdev);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to update fec stats, ret = %d\n", ret);
+
+ clear_bit(HCLGE_STATE_FEC_STATS_UPDATING, &hdev->state);
+}
+
+static void hclge_get_fec_stats_total(struct hclge_dev *hdev,
+ struct ethtool_fec_stats *fec_stats)
+{
+ fec_stats->corrected_blocks.total = hdev->fec_stats.rs_corr_blocks;
+ fec_stats->uncorrectable_blocks.total =
+ hdev->fec_stats.rs_uncorr_blocks;
+}
+
+static void hclge_get_fec_stats_lanes(struct hclge_dev *hdev,
+ struct ethtool_fec_stats *fec_stats)
+{
+ u32 i;
+
+ if (hdev->fec_stats.base_r_lane_num == 0 ||
+ hdev->fec_stats.base_r_lane_num > HCLGE_FEC_STATS_MAX_LANES) {
+ dev_err(&hdev->pdev->dev,
+ "fec stats lane number(%llu) is invalid\n",
+ hdev->fec_stats.base_r_lane_num);
+ return;
+ }
+
+ for (i = 0; i < hdev->fec_stats.base_r_lane_num; i++) {
+ fec_stats->corrected_blocks.lanes[i] =
+ hdev->fec_stats.base_r_corr_per_lanes[i];
+ fec_stats->uncorrectable_blocks.lanes[i] =
+ hdev->fec_stats.base_r_uncorr_per_lanes[i];
+ }
+}
+
+static void hclge_comm_get_fec_stats(struct hclge_dev *hdev,
+ struct ethtool_fec_stats *fec_stats)
+{
+ u32 fec_mode = hdev->hw.mac.fec_mode;
+
+ switch (fec_mode) {
+ case BIT(HNAE3_FEC_RS):
+ case BIT(HNAE3_FEC_LLRS):
+ hclge_get_fec_stats_total(hdev, fec_stats);
+ break;
+ case BIT(HNAE3_FEC_BASER):
+ hclge_get_fec_stats_lanes(hdev, fec_stats);
+ break;
+ default:
+ dev_err(&hdev->pdev->dev,
+ "fec stats is not supported by current fec mode(0x%x)\n",
+ fec_mode);
+ break;
+ }
+}
+
+static void hclge_get_fec_stats(struct hnae3_handle *handle,
+ struct ethtool_fec_stats *fec_stats)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ u32 fec_mode = hdev->hw.mac.fec_mode;
+
+ if (fec_mode == BIT(HNAE3_FEC_NONE) ||
+ fec_mode == BIT(HNAE3_FEC_AUTO) ||
+ fec_mode == BIT(HNAE3_FEC_USER_DEF))
+ return;
+
+ hclge_update_fec_stats(hdev);
+
+ hclge_comm_get_fec_stats(hdev, fec_stats);
+}
+
+static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
+{
+ struct hclge_config_fec_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
+
+ req = (struct hclge_config_fec_cmd *)desc.data;
+ if (fec_mode & BIT(HNAE3_FEC_AUTO))
+ hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
+ if (fec_mode & BIT(HNAE3_FEC_RS))
+ hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
+ HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
+ if (fec_mode & BIT(HNAE3_FEC_LLRS))
+ hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
+ HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_LLRS);
+ if (fec_mode & BIT(HNAE3_FEC_BASER))
+ hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
+ HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
+
+ return ret;
+}
+
+static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_mac *mac = &hdev->hw.mac;
+ int ret;
+
+ if (fec_mode && !(mac->fec_ability & fec_mode)) {
+ dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
+ return -EINVAL;
+ }
+
+ ret = hclge_set_fec_hw(hdev, fec_mode);
+ if (ret)
+ return ret;
+
+ mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
+ return 0;
+}
+
+static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
+ u8 *fec_mode)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_mac *mac = &hdev->hw.mac;
+
+ if (fec_ability)
+ *fec_ability = mac->fec_ability;
+ if (fec_mode)
+ *fec_mode = mac->fec_mode;
+}
+
+static int hclge_mac_init(struct hclge_dev *hdev)
+{
+ struct hclge_mac *mac = &hdev->hw.mac;
+ int ret;
+
+ hdev->support_sfp_query = true;
+ hdev->hw.mac.duplex = HCLGE_MAC_FULL;
+ ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
+ hdev->hw.mac.duplex, hdev->hw.mac.lane_num);
+ if (ret)
+ return ret;
+
+ if (hdev->hw.mac.support_autoneg) {
+ ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
+ if (ret)
+ return ret;
+ }
+
+ mac->link = 0;
+
+ if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
+ ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
+ if (ret)
+ return ret;
+ }
+
+ ret = hclge_set_mac_mtu(hdev, hdev->mps);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
+ return ret;
+ }
+
+ ret = hclge_set_default_loopback(hdev);
+ if (ret)
+ return ret;
+
+ ret = hclge_buffer_alloc(hdev);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "allocate buffer fail, ret=%d\n", ret);
+
+ return ret;
+}
+
+static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
+{
+ if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
+ !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state)) {
+ hdev->last_mbx_scheduled = jiffies;
+ mod_delayed_work(hclge_wq, &hdev->service_task, 0);
+ }
+}
+
+static void hclge_reset_task_schedule(struct hclge_dev *hdev)
+{
+ if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
+ test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state) &&
+ !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) {
+ hdev->last_rst_scheduled = jiffies;
+ mod_delayed_work(hclge_wq, &hdev->service_task, 0);
+ }
+}
+
+static void hclge_errhand_task_schedule(struct hclge_dev *hdev)
+{
+ if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
+ !test_and_set_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
+ mod_delayed_work(hclge_wq, &hdev->service_task, 0);
+}
+
+void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
+{
+ if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
+ !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
+ mod_delayed_work(hclge_wq, &hdev->service_task, delay_time);
+}
+
+static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
+{
+ struct hclge_link_status_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
+ ret);
+ return ret;
+ }
+
+ req = (struct hclge_link_status_cmd *)desc.data;
+ *link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
+ HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
+
+ return 0;
+}
+
+static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
+{
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+
+ *link_status = HCLGE_LINK_STATUS_DOWN;
+
+ if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
+ return 0;
+
+ if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
+ return 0;
+
+ return hclge_get_mac_link_status(hdev, link_status);
+}
+
+static void hclge_push_link_status(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport;
+ int ret;
+ u16 i;
+
+ for (i = 0; i < pci_num_vf(hdev->pdev); i++) {
+ vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM];
+
+ if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) ||
+ vport->vf_info.link_state != IFLA_VF_LINK_STATE_AUTO)
+ continue;
+
+ ret = hclge_push_vf_link_status(vport);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to push link status to vf%u, ret = %d\n",
+ i, ret);
+ }
+ }
+}
+
+static void hclge_update_link_status(struct hclge_dev *hdev)
+{
+ struct hnae3_handle *rhandle = &hdev->vport[0].roce;
+ struct hnae3_handle *handle = &hdev->vport[0].nic;
+ struct hnae3_client *rclient = hdev->roce_client;
+ struct hnae3_client *client = hdev->nic_client;
+ int state;
+ int ret;
+
+ if (!client)
+ return;
+
+ if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
+ return;
+
+ ret = hclge_get_mac_phy_link(hdev, &state);
+ if (ret) {
+ clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
+ return;
+ }
+
+ if (state != hdev->hw.mac.link) {
+ hdev->hw.mac.link = state;
+ if (state == HCLGE_LINK_STATUS_UP)
+ hclge_update_port_info(hdev);
+
+ client->ops->link_status_change(handle, state);
+ hclge_config_mac_tnl_int(hdev, state);
+ if (rclient && rclient->ops->link_status_change)
+ rclient->ops->link_status_change(rhandle, state);
+
+ hclge_push_link_status(hdev);
+ }
+
+ clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
+}
+
+static void hclge_update_speed_advertising(struct hclge_mac *mac)
+{
+ u32 speed_ability;
+
+ if (hclge_get_speed_bit(mac->speed, &speed_ability))
+ return;
+
+ switch (mac->module_type) {
+ case HNAE3_MODULE_TYPE_FIBRE_LR:
+ hclge_convert_setting_lr(speed_ability, mac->advertising);
+ break;
+ case HNAE3_MODULE_TYPE_FIBRE_SR:
+ case HNAE3_MODULE_TYPE_AOC:
+ hclge_convert_setting_sr(speed_ability, mac->advertising);
+ break;
+ case HNAE3_MODULE_TYPE_CR:
+ hclge_convert_setting_cr(speed_ability, mac->advertising);
+ break;
+ case HNAE3_MODULE_TYPE_KR:
+ hclge_convert_setting_kr(speed_ability, mac->advertising);
+ break;
+ default:
+ break;
+ }
+}
+
+static void hclge_update_fec_advertising(struct hclge_mac *mac)
+{
+ if (mac->fec_mode & BIT(HNAE3_FEC_RS))
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
+ mac->advertising);
+ else if (mac->fec_mode & BIT(HNAE3_FEC_LLRS))
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT,
+ mac->advertising);
+ else if (mac->fec_mode & BIT(HNAE3_FEC_BASER))
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
+ mac->advertising);
+ else
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
+ mac->advertising);
+}
+
+static void hclge_update_pause_advertising(struct hclge_dev *hdev)
+{
+ struct hclge_mac *mac = &hdev->hw.mac;
+ bool rx_en, tx_en;
+
+ switch (hdev->fc_mode_last_time) {
+ case HCLGE_FC_RX_PAUSE:
+ rx_en = true;
+ tx_en = false;
+ break;
+ case HCLGE_FC_TX_PAUSE:
+ rx_en = false;
+ tx_en = true;
+ break;
+ case HCLGE_FC_FULL:
+ rx_en = true;
+ tx_en = true;
+ break;
+ default:
+ rx_en = false;
+ tx_en = false;
+ break;
+ }
+
+ linkmode_set_pause(mac->advertising, tx_en, rx_en);
+}
+
+static void hclge_update_advertising(struct hclge_dev *hdev)
+{
+ struct hclge_mac *mac = &hdev->hw.mac;
+
+ linkmode_zero(mac->advertising);
+ hclge_update_speed_advertising(mac);
+ hclge_update_fec_advertising(mac);
+ hclge_update_pause_advertising(hdev);
+}
+
+static void hclge_update_port_capability(struct hclge_dev *hdev,
+ struct hclge_mac *mac)
+{
+ if (hnae3_dev_fec_supported(hdev))
+ hclge_convert_setting_fec(mac);
+
+ /* firmware can not identify back plane type, the media type
+ * read from configuration can help deal it
+ */
+ if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
+ mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
+ mac->module_type = HNAE3_MODULE_TYPE_KR;
+ else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
+ mac->module_type = HNAE3_MODULE_TYPE_TP;
+
+ if (mac->support_autoneg) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
+ linkmode_copy(mac->advertising, mac->supported);
+ } else {
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ mac->supported);
+ hclge_update_advertising(hdev);
+ }
+}
+
+static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
+{
+ struct hclge_sfp_info_cmd *resp;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
+ resp = (struct hclge_sfp_info_cmd *)desc.data;
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret == -EOPNOTSUPP) {
+ dev_warn(&hdev->pdev->dev,
+ "IMP do not support get SFP speed %d\n", ret);
+ return ret;
+ } else if (ret) {
+ dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
+ return ret;
+ }
+
+ *speed = le32_to_cpu(resp->speed);
+
+ return 0;
+}
+
+static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
+{
+ struct hclge_sfp_info_cmd *resp;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
+ resp = (struct hclge_sfp_info_cmd *)desc.data;
+
+ resp->query_type = QUERY_ACTIVE_SPEED;
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret == -EOPNOTSUPP) {
+ dev_warn(&hdev->pdev->dev,
+ "IMP does not support get SFP info %d\n", ret);
+ return ret;
+ } else if (ret) {
+ dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
+ return ret;
+ }
+
+ /* In some case, mac speed get from IMP may be 0, it shouldn't be
+ * set to mac->speed.
+ */
+ if (!le32_to_cpu(resp->speed))
+ return 0;
+
+ mac->speed = le32_to_cpu(resp->speed);
+ /* if resp->speed_ability is 0, it means it's an old version
+ * firmware, do not update these params
+ */
+ if (resp->speed_ability) {
+ mac->module_type = le32_to_cpu(resp->module_type);
+ mac->speed_ability = le32_to_cpu(resp->speed_ability);
+ mac->autoneg = resp->autoneg;
+ mac->support_autoneg = resp->autoneg_ability;
+ mac->speed_type = QUERY_ACTIVE_SPEED;
+ mac->lane_num = resp->lane_num;
+ if (!resp->active_fec)
+ mac->fec_mode = 0;
+ else
+ mac->fec_mode = BIT(resp->active_fec);
+ mac->fec_ability = resp->fec_ability;
+ } else {
+ mac->speed_type = QUERY_SFP_SPEED;
+ }
+
+ return 0;
+}
+
+static int hclge_get_phy_link_ksettings(struct hnae3_handle *handle,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_phy_link_ksetting_0_cmd *req0;
+ struct hclge_phy_link_ksetting_1_cmd *req1;
+ u32 supported, advertising, lp_advertising;
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
+ true);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
+ true);
+
+ ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get phy link ksetting, ret = %d.\n", ret);
+ return ret;
+ }
+
+ req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
+ cmd->base.autoneg = req0->autoneg;
+ cmd->base.speed = le32_to_cpu(req0->speed);
+ cmd->base.duplex = req0->duplex;
+ cmd->base.port = req0->port;
+ cmd->base.transceiver = req0->transceiver;
+ cmd->base.phy_address = req0->phy_address;
+ cmd->base.eth_tp_mdix = req0->eth_tp_mdix;
+ cmd->base.eth_tp_mdix_ctrl = req0->eth_tp_mdix_ctrl;
+ supported = le32_to_cpu(req0->supported);
+ advertising = le32_to_cpu(req0->advertising);
+ lp_advertising = le32_to_cpu(req0->lp_advertising);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
+ lp_advertising);
+
+ req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
+ cmd->base.master_slave_cfg = req1->master_slave_cfg;
+ cmd->base.master_slave_state = req1->master_slave_state;
+
+ return 0;
+}
+
+static int
+hclge_set_phy_link_ksettings(struct hnae3_handle *handle,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_phy_link_ksetting_0_cmd *req0;
+ struct hclge_phy_link_ksetting_1_cmd *req1;
+ struct hclge_dev *hdev = vport->back;
+ u32 advertising;
+ int ret;
+
+ if (cmd->base.autoneg == AUTONEG_DISABLE &&
+ ((cmd->base.speed != SPEED_100 && cmd->base.speed != SPEED_10) ||
+ (cmd->base.duplex != DUPLEX_HALF &&
+ cmd->base.duplex != DUPLEX_FULL)))
+ return -EINVAL;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
+ false);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
+ false);
+
+ req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
+ req0->autoneg = cmd->base.autoneg;
+ req0->speed = cpu_to_le32(cmd->base.speed);
+ req0->duplex = cmd->base.duplex;
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
+ req0->advertising = cpu_to_le32(advertising);
+ req0->eth_tp_mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
+
+ req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
+ req1->master_slave_cfg = cmd->base.master_slave_cfg;
+
+ ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to set phy link ksettings, ret = %d.\n", ret);
+ return ret;
+ }
+
+ hdev->hw.mac.autoneg = cmd->base.autoneg;
+ hdev->hw.mac.speed = cmd->base.speed;
+ hdev->hw.mac.duplex = cmd->base.duplex;
+ linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising);
+
+ return 0;
+}
+
+static int hclge_update_tp_port_info(struct hclge_dev *hdev)
+{
+ struct ethtool_link_ksettings cmd;
+ int ret;
+
+ if (!hnae3_dev_phy_imp_supported(hdev))
+ return 0;
+
+ ret = hclge_get_phy_link_ksettings(&hdev->vport->nic, &cmd);
+ if (ret)
+ return ret;
+
+ hdev->hw.mac.autoneg = cmd.base.autoneg;
+ hdev->hw.mac.speed = cmd.base.speed;
+ hdev->hw.mac.duplex = cmd.base.duplex;
+ linkmode_copy(hdev->hw.mac.advertising, cmd.link_modes.advertising);
+
+ return 0;
+}
+
+static int hclge_tp_port_init(struct hclge_dev *hdev)
+{
+ struct ethtool_link_ksettings cmd;
+
+ if (!hnae3_dev_phy_imp_supported(hdev))
+ return 0;
+
+ cmd.base.autoneg = hdev->hw.mac.autoneg;
+ cmd.base.speed = hdev->hw.mac.speed;
+ cmd.base.duplex = hdev->hw.mac.duplex;
+ linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising);
+
+ return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd);
+}
+
+static int hclge_update_port_info(struct hclge_dev *hdev)
+{
+ struct hclge_mac *mac = &hdev->hw.mac;
+ int speed;
+ int ret;
+
+ /* get the port info from SFP cmd if not copper port */
+ if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
+ return hclge_update_tp_port_info(hdev);
+
+ /* if IMP does not support get SFP/qSFP info, return directly */
+ if (!hdev->support_sfp_query)
+ return 0;
+
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
+ speed = mac->speed;
+ ret = hclge_get_sfp_info(hdev, mac);
+ } else {
+ speed = HCLGE_MAC_SPEED_UNKNOWN;
+ ret = hclge_get_sfp_speed(hdev, &speed);
+ }
+
+ if (ret == -EOPNOTSUPP) {
+ hdev->support_sfp_query = false;
+ return ret;
+ } else if (ret) {
+ return ret;
+ }
+
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
+ if (mac->speed_type == QUERY_ACTIVE_SPEED) {
+ hclge_update_port_capability(hdev, mac);
+ if (mac->speed != speed)
+ (void)hclge_tm_port_shaper_cfg(hdev);
+ return 0;
+ }
+ return hclge_cfg_mac_speed_dup(hdev, mac->speed,
+ HCLGE_MAC_FULL, mac->lane_num);
+ } else {
+ if (speed == HCLGE_MAC_SPEED_UNKNOWN)
+ return 0; /* do nothing if no SFP */
+
+ /* must config full duplex for SFP */
+ return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL, 0);
+ }
+}
+
+static int hclge_get_status(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ hclge_update_link_status(hdev);
+
+ return hdev->hw.mac.link;
+}
+
+static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
+{
+ if (!pci_num_vf(hdev->pdev)) {
+ dev_err(&hdev->pdev->dev,
+ "SRIOV is disabled, can not get vport(%d) info.\n", vf);
+ return NULL;
+ }
+
+ if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
+ dev_err(&hdev->pdev->dev,
+ "vf id(%d) is out of range(0 <= vfid < %d)\n",
+ vf, pci_num_vf(hdev->pdev));
+ return NULL;
+ }
+
+ /* VF start from 1 in vport */
+ vf += HCLGE_VF_VPORT_START_NUM;
+ return &hdev->vport[vf];
+}
+
+static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
+ struct ifla_vf_info *ivf)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ vport = hclge_get_vf_vport(hdev, vf);
+ if (!vport)
+ return -EINVAL;
+
+ ivf->vf = vf;
+ ivf->linkstate = vport->vf_info.link_state;
+ ivf->spoofchk = vport->vf_info.spoofchk;
+ ivf->trusted = vport->vf_info.trusted;
+ ivf->min_tx_rate = 0;
+ ivf->max_tx_rate = vport->vf_info.max_tx_rate;
+ ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
+ ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
+ ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
+ ether_addr_copy(ivf->mac, vport->vf_info.mac);
+
+ return 0;
+}
+
+static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
+ int link_state)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ int link_state_old;
+ int ret;
+
+ vport = hclge_get_vf_vport(hdev, vf);
+ if (!vport)
+ return -EINVAL;
+
+ link_state_old = vport->vf_info.link_state;
+ vport->vf_info.link_state = link_state;
+
+ /* return success directly if the VF is unalive, VF will
+ * query link state itself when it starts work.
+ */
+ if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
+ return 0;
+
+ ret = hclge_push_vf_link_status(vport);
+ if (ret) {
+ vport->vf_info.link_state = link_state_old;
+ dev_err(&hdev->pdev->dev,
+ "failed to push vf%d link status, ret = %d\n", vf, ret);
+ }
+
+ return ret;
+}
+
+static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
+{
+ u32 cmdq_src_reg, msix_src_reg, hw_err_src_reg;
+
+ /* fetch the events from their corresponding regs */
+ cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
+ msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
+ hw_err_src_reg = hclge_read_dev(&hdev->hw,
+ HCLGE_RAS_PF_OTHER_INT_STS_REG);
+
+ /* Assumption: If by any chance reset and mailbox events are reported
+ * together then we will only process reset event in this go and will
+ * defer the processing of the mailbox events. Since, we would have not
+ * cleared RX CMDQ event this time we would receive again another
+ * interrupt from H/W just for the mailbox.
+ *
+ * check for vector0 reset event sources
+ */
+ if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
+ dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
+ set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
+ set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
+ *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
+ hdev->rst_stats.imp_rst_cnt++;
+ return HCLGE_VECTOR0_EVENT_RST;
+ }
+
+ if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
+ dev_info(&hdev->pdev->dev, "global reset interrupt\n");
+ set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
+ set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
+ *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
+ hdev->rst_stats.global_rst_cnt++;
+ return HCLGE_VECTOR0_EVENT_RST;
+ }
+
+ /* check for vector0 msix event and hardware error event source */
+ if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK ||
+ hw_err_src_reg & HCLGE_RAS_REG_ERR_MASK)
+ return HCLGE_VECTOR0_EVENT_ERR;
+
+ /* check for vector0 ptp event source */
+ if (BIT(HCLGE_VECTOR0_REG_PTP_INT_B) & msix_src_reg) {
+ *clearval = msix_src_reg;
+ return HCLGE_VECTOR0_EVENT_PTP;
+ }
+
+ /* check for vector0 mailbox(=CMDQ RX) event source */
+ if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
+ cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
+ *clearval = cmdq_src_reg;
+ return HCLGE_VECTOR0_EVENT_MBX;
+ }
+
+ /* print other vector0 event source */
+ dev_info(&hdev->pdev->dev,
+ "INT status: CMDQ(%#x) HW errors(%#x) other(%#x)\n",
+ cmdq_src_reg, hw_err_src_reg, msix_src_reg);
+
+ return HCLGE_VECTOR0_EVENT_OTHER;
+}
+
+static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
+ u32 regclr)
+{
+#define HCLGE_IMP_RESET_DELAY 5
+
+ switch (event_type) {
+ case HCLGE_VECTOR0_EVENT_PTP:
+ case HCLGE_VECTOR0_EVENT_RST:
+ if (regclr == BIT(HCLGE_VECTOR0_IMPRESET_INT_B))
+ mdelay(HCLGE_IMP_RESET_DELAY);
+
+ hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
+ break;
+ case HCLGE_VECTOR0_EVENT_MBX:
+ hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
+ break;
+ default:
+ break;
+ }
+}
+
+static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
+{
+ hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
+ BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
+ BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
+ BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
+ hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
+}
+
+static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
+{
+ writel(enable ? 1 : 0, vector->addr);
+}
+
+static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
+{
+ struct hclge_dev *hdev = data;
+ unsigned long flags;
+ u32 clearval = 0;
+ u32 event_cause;
+
+ hclge_enable_vector(&hdev->misc_vector, false);
+ event_cause = hclge_check_event_cause(hdev, &clearval);
+
+ /* vector 0 interrupt is shared with reset and mailbox source events. */
+ switch (event_cause) {
+ case HCLGE_VECTOR0_EVENT_ERR:
+ hclge_errhand_task_schedule(hdev);
+ break;
+ case HCLGE_VECTOR0_EVENT_RST:
+ hclge_reset_task_schedule(hdev);
+ break;
+ case HCLGE_VECTOR0_EVENT_PTP:
+ spin_lock_irqsave(&hdev->ptp->lock, flags);
+ hclge_ptp_clean_tx_hwts(hdev);
+ spin_unlock_irqrestore(&hdev->ptp->lock, flags);
+ break;
+ case HCLGE_VECTOR0_EVENT_MBX:
+ /* If we are here then,
+ * 1. Either we are not handling any mbx task and we are not
+ * scheduled as well
+ * OR
+ * 2. We could be handling a mbx task but nothing more is
+ * scheduled.
+ * In both cases, we should schedule mbx task as there are more
+ * mbx messages reported by this interrupt.
+ */
+ hclge_mbx_task_schedule(hdev);
+ break;
+ default:
+ dev_warn(&hdev->pdev->dev,
+ "received unknown or unhandled event of vector0\n");
+ break;
+ }
+
+ hclge_clear_event_cause(hdev, event_cause, clearval);
+
+ /* Enable interrupt if it is not caused by reset event or error event */
+ if (event_cause == HCLGE_VECTOR0_EVENT_PTP ||
+ event_cause == HCLGE_VECTOR0_EVENT_MBX ||
+ event_cause == HCLGE_VECTOR0_EVENT_OTHER)
+ hclge_enable_vector(&hdev->misc_vector, true);
+
+ return IRQ_HANDLED;
+}
+
+static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
+{
+ if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
+ dev_warn(&hdev->pdev->dev,
+ "vector(vector_id %d) has been freed.\n", vector_id);
+ return;
+ }
+
+ hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
+ hdev->num_msi_left += 1;
+ hdev->num_msi_used -= 1;
+}
+
+static void hclge_get_misc_vector(struct hclge_dev *hdev)
+{
+ struct hclge_misc_vector *vector = &hdev->misc_vector;
+
+ vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
+
+ vector->addr = hdev->hw.hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
+ hdev->vector_status[0] = 0;
+
+ hdev->num_msi_left -= 1;
+ hdev->num_msi_used += 1;
+}
+
+static int hclge_misc_irq_init(struct hclge_dev *hdev)
+{
+ int ret;
+
+ hclge_get_misc_vector(hdev);
+
+ /* this would be explicitly freed in the end */
+ snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
+ HCLGE_NAME, pci_name(hdev->pdev));
+ ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
+ 0, hdev->misc_vector.name, hdev);
+ if (ret) {
+ hclge_free_vector(hdev, 0);
+ dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
+ hdev->misc_vector.vector_irq);
+ }
+
+ return ret;
+}
+
+static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
+{
+ free_irq(hdev->misc_vector.vector_irq, hdev);
+ hclge_free_vector(hdev, 0);
+}
+
+int hclge_notify_client(struct hclge_dev *hdev,
+ enum hnae3_reset_notify_type type)
+{
+ struct hnae3_handle *handle = &hdev->vport[0].nic;
+ struct hnae3_client *client = hdev->nic_client;
+ int ret;
+
+ if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
+ return 0;
+
+ if (!client->ops->reset_notify)
+ return -EOPNOTSUPP;
+
+ ret = client->ops->reset_notify(handle, type);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n",
+ type, ret);
+
+ return ret;
+}
+
+static int hclge_notify_roce_client(struct hclge_dev *hdev,
+ enum hnae3_reset_notify_type type)
+{
+ struct hnae3_handle *handle = &hdev->vport[0].roce;
+ struct hnae3_client *client = hdev->roce_client;
+ int ret;
+
+ if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
+ return 0;
+
+ if (!client->ops->reset_notify)
+ return -EOPNOTSUPP;
+
+ ret = client->ops->reset_notify(handle, type);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)",
+ type, ret);
+
+ return ret;
+}
+
+static int hclge_reset_wait(struct hclge_dev *hdev)
+{
+#define HCLGE_RESET_WATI_MS 100
+#define HCLGE_RESET_WAIT_CNT 350
+
+ u32 val, reg, reg_bit;
+ u32 cnt = 0;
+
+ switch (hdev->reset_type) {
+ case HNAE3_IMP_RESET:
+ reg = HCLGE_GLOBAL_RESET_REG;
+ reg_bit = HCLGE_IMP_RESET_BIT;
+ break;
+ case HNAE3_GLOBAL_RESET:
+ reg = HCLGE_GLOBAL_RESET_REG;
+ reg_bit = HCLGE_GLOBAL_RESET_BIT;
+ break;
+ case HNAE3_FUNC_RESET:
+ reg = HCLGE_FUN_RST_ING;
+ reg_bit = HCLGE_FUN_RST_ING_B;
+ break;
+ default:
+ dev_err(&hdev->pdev->dev,
+ "Wait for unsupported reset type: %d\n",
+ hdev->reset_type);
+ return -EINVAL;
+ }
+
+ val = hclge_read_dev(&hdev->hw, reg);
+ while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
+ msleep(HCLGE_RESET_WATI_MS);
+ val = hclge_read_dev(&hdev->hw, reg);
+ cnt++;
+ }
+
+ if (cnt >= HCLGE_RESET_WAIT_CNT) {
+ dev_warn(&hdev->pdev->dev,
+ "Wait for reset timeout: %d\n", hdev->reset_type);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
+{
+ struct hclge_vf_rst_cmd *req;
+ struct hclge_desc desc;
+
+ req = (struct hclge_vf_rst_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
+ req->dest_vfid = func_id;
+
+ if (reset)
+ req->vf_rst = 0x1;
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
+static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
+{
+ int i;
+
+ for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++) {
+ struct hclge_vport *vport = &hdev->vport[i];
+ int ret;
+
+ /* Send cmd to set/clear VF's FUNC_RST_ING */
+ ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "set vf(%u) rst failed %d!\n",
+ vport->vport_id - HCLGE_VF_VPORT_START_NUM,
+ ret);
+ return ret;
+ }
+
+ if (!reset ||
+ !test_bit(HCLGE_VPORT_STATE_INITED, &vport->state))
+ continue;
+
+ if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) &&
+ hdev->reset_type == HNAE3_FUNC_RESET) {
+ set_bit(HCLGE_VPORT_NEED_NOTIFY_RESET,
+ &vport->need_notify);
+ continue;
+ }
+
+ /* Inform VF to process the reset.
+ * hclge_inform_reset_assert_to_vf may fail if VF
+ * driver is not loaded.
+ */
+ ret = hclge_inform_reset_assert_to_vf(vport);
+ if (ret)
+ dev_warn(&hdev->pdev->dev,
+ "inform reset to vf(%u) failed %d!\n",
+ vport->vport_id - HCLGE_VF_VPORT_START_NUM,
+ ret);
+ }
+
+ return 0;
+}
+
+static void hclge_mailbox_service_task(struct hclge_dev *hdev)
+{
+ if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
+ test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state) ||
+ test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
+ return;
+
+ if (time_is_before_jiffies(hdev->last_mbx_scheduled +
+ HCLGE_MBX_SCHED_TIMEOUT))
+ dev_warn(&hdev->pdev->dev,
+ "mbx service task is scheduled after %ums on cpu%u!\n",
+ jiffies_to_msecs(jiffies - hdev->last_mbx_scheduled),
+ smp_processor_id());
+
+ hclge_mbx_handler(hdev);
+
+ clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
+}
+
+static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
+{
+ struct hclge_pf_rst_sync_cmd *req;
+ struct hclge_desc desc;
+ int cnt = 0;
+ int ret;
+
+ req = (struct hclge_pf_rst_sync_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
+
+ do {
+ /* vf need to down netdev by mbx during PF or FLR reset */
+ hclge_mailbox_service_task(hdev);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ /* for compatible with old firmware, wait
+ * 100 ms for VF to stop IO
+ */
+ if (ret == -EOPNOTSUPP) {
+ msleep(HCLGE_RESET_SYNC_TIME);
+ return;
+ } else if (ret) {
+ dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
+ ret);
+ return;
+ } else if (req->all_vf_ready) {
+ return;
+ }
+ msleep(HCLGE_PF_RESET_SYNC_TIME);
+ hclge_comm_cmd_reuse_desc(&desc, true);
+ } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
+
+ dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
+}
+
+void hclge_report_hw_error(struct hclge_dev *hdev,
+ enum hnae3_hw_error_type type)
+{
+ struct hnae3_client *client = hdev->nic_client;
+
+ if (!client || !client->ops->process_hw_error ||
+ !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
+ return;
+
+ client->ops->process_hw_error(&hdev->vport[0].nic, type);
+}
+
+static void hclge_handle_imp_error(struct hclge_dev *hdev)
+{
+ u32 reg_val;
+
+ reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
+ if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
+ hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
+ reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
+ hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
+ }
+
+ if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
+ hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
+ reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
+ hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
+ }
+}
+
+int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
+{
+ struct hclge_desc desc;
+ struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
+ hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
+ req->fun_reset_vfid = func_id;
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "send function reset cmd fail, status =%d\n", ret);
+
+ return ret;
+}
+
+static void hclge_do_reset(struct hclge_dev *hdev)
+{
+ struct hnae3_handle *handle = &hdev->vport[0].nic;
+ struct pci_dev *pdev = hdev->pdev;
+ u32 val;
+
+ if (hclge_get_hw_reset_stat(handle)) {
+ dev_info(&pdev->dev, "hardware reset not finish\n");
+ dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
+ hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
+ hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
+ return;
+ }
+
+ switch (hdev->reset_type) {
+ case HNAE3_IMP_RESET:
+ dev_info(&pdev->dev, "IMP reset requested\n");
+ val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
+ hnae3_set_bit(val, HCLGE_TRIGGER_IMP_RESET_B, 1);
+ hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, val);
+ break;
+ case HNAE3_GLOBAL_RESET:
+ dev_info(&pdev->dev, "global reset requested\n");
+ val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
+ hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
+ hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
+ break;
+ case HNAE3_FUNC_RESET:
+ dev_info(&pdev->dev, "PF reset requested\n");
+ /* schedule again to check later */
+ set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
+ hclge_reset_task_schedule(hdev);
+ break;
+ default:
+ dev_warn(&pdev->dev,
+ "unsupported reset type: %d\n", hdev->reset_type);
+ break;
+ }
+}
+
+static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
+ unsigned long *addr)
+{
+ enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
+ struct hclge_dev *hdev = ae_dev->priv;
+
+ /* return the highest priority reset level amongst all */
+ if (test_bit(HNAE3_IMP_RESET, addr)) {
+ rst_level = HNAE3_IMP_RESET;
+ clear_bit(HNAE3_IMP_RESET, addr);
+ clear_bit(HNAE3_GLOBAL_RESET, addr);
+ clear_bit(HNAE3_FUNC_RESET, addr);
+ } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
+ rst_level = HNAE3_GLOBAL_RESET;
+ clear_bit(HNAE3_GLOBAL_RESET, addr);
+ clear_bit(HNAE3_FUNC_RESET, addr);
+ } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
+ rst_level = HNAE3_FUNC_RESET;
+ clear_bit(HNAE3_FUNC_RESET, addr);
+ } else if (test_bit(HNAE3_FLR_RESET, addr)) {
+ rst_level = HNAE3_FLR_RESET;
+ clear_bit(HNAE3_FLR_RESET, addr);
+ }
+
+ if (hdev->reset_type != HNAE3_NONE_RESET &&
+ rst_level < hdev->reset_type)
+ return HNAE3_NONE_RESET;
+
+ return rst_level;
+}
+
+static void hclge_clear_reset_cause(struct hclge_dev *hdev)
+{
+ u32 clearval = 0;
+
+ switch (hdev->reset_type) {
+ case HNAE3_IMP_RESET:
+ clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
+ break;
+ case HNAE3_GLOBAL_RESET:
+ clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
+ break;
+ default:
+ break;
+ }
+
+ if (!clearval)
+ return;
+
+ /* For revision 0x20, the reset interrupt source
+ * can only be cleared after hardware reset done
+ */
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
+ hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
+ clearval);
+
+ hclge_enable_vector(&hdev->misc_vector, true);
+}
+
+static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
+{
+ u32 reg_val;
+
+ reg_val = hclge_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG);
+ if (enable)
+ reg_val |= HCLGE_COMM_NIC_SW_RST_RDY;
+ else
+ reg_val &= ~HCLGE_COMM_NIC_SW_RST_RDY;
+
+ hclge_write_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG, reg_val);
+}
+
+static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
+{
+ int ret;
+
+ ret = hclge_set_all_vf_rst(hdev, true);
+ if (ret)
+ return ret;
+
+ hclge_func_reset_sync_vf(hdev);
+
+ return 0;
+}
+
+static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
+{
+ u32 reg_val;
+ int ret = 0;
+
+ switch (hdev->reset_type) {
+ case HNAE3_FUNC_RESET:
+ ret = hclge_func_reset_notify_vf(hdev);
+ if (ret)
+ return ret;
+
+ ret = hclge_func_reset_cmd(hdev, 0);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "asserting function reset fail %d!\n", ret);
+ return ret;
+ }
+
+ /* After performaning pf reset, it is not necessary to do the
+ * mailbox handling or send any command to firmware, because
+ * any mailbox handling or command to firmware is only valid
+ * after hclge_comm_cmd_init is called.
+ */
+ set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
+ hdev->rst_stats.pf_rst_cnt++;
+ break;
+ case HNAE3_FLR_RESET:
+ ret = hclge_func_reset_notify_vf(hdev);
+ if (ret)
+ return ret;
+ break;
+ case HNAE3_IMP_RESET:
+ hclge_handle_imp_error(hdev);
+ reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
+ hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
+ BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
+ break;
+ default:
+ break;
+ }
+
+ /* inform hardware that preparatory work is done */
+ msleep(HCLGE_RESET_SYNC_TIME);
+ hclge_reset_handshake(hdev, true);
+ dev_info(&hdev->pdev->dev, "prepare wait ok\n");
+
+ return ret;
+}
+
+static void hclge_show_rst_info(struct hclge_dev *hdev)
+{
+ char *buf;
+
+ buf = kzalloc(HCLGE_DBG_RESET_INFO_LEN, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ hclge_dbg_dump_rst_info(hdev, buf, HCLGE_DBG_RESET_INFO_LEN);
+
+ dev_info(&hdev->pdev->dev, "dump reset info:\n%s", buf);
+
+ kfree(buf);
+}
+
+static bool hclge_reset_err_handle(struct hclge_dev *hdev)
+{
+#define MAX_RESET_FAIL_CNT 5
+
+ if (hdev->reset_pending) {
+ dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
+ hdev->reset_pending);
+ return true;
+ } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
+ HCLGE_RESET_INT_M) {
+ dev_info(&hdev->pdev->dev,
+ "reset failed because new reset interrupt\n");
+ hclge_clear_reset_cause(hdev);
+ return false;
+ } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
+ hdev->rst_stats.reset_fail_cnt++;
+ set_bit(hdev->reset_type, &hdev->reset_pending);
+ dev_info(&hdev->pdev->dev,
+ "re-schedule reset task(%u)\n",
+ hdev->rst_stats.reset_fail_cnt);
+ return true;
+ }
+
+ hclge_clear_reset_cause(hdev);
+
+ /* recover the handshake status when reset fail */
+ hclge_reset_handshake(hdev, true);
+
+ dev_err(&hdev->pdev->dev, "Reset fail!\n");
+
+ hclge_show_rst_info(hdev);
+
+ set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
+
+ return false;
+}
+
+static void hclge_update_reset_level(struct hclge_dev *hdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ enum hnae3_reset_type reset_level;
+
+ /* reset request will not be set during reset, so clear
+ * pending reset request to avoid unnecessary reset
+ * caused by the same reason.
+ */
+ hclge_get_reset_level(ae_dev, &hdev->reset_request);
+
+ /* if default_reset_request has a higher level reset request,
+ * it should be handled as soon as possible. since some errors
+ * need this kind of reset to fix.
+ */
+ reset_level = hclge_get_reset_level(ae_dev,
+ &hdev->default_reset_request);
+ if (reset_level != HNAE3_NONE_RESET)
+ set_bit(reset_level, &hdev->reset_request);
+}
+
+static int hclge_set_rst_done(struct hclge_dev *hdev)
+{
+ struct hclge_pf_rst_done_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ req = (struct hclge_pf_rst_done_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
+ req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ /* To be compatible with the old firmware, which does not support
+ * command HCLGE_OPC_PF_RST_DONE, just print a warning and
+ * return success
+ */
+ if (ret == -EOPNOTSUPP) {
+ dev_warn(&hdev->pdev->dev,
+ "current firmware does not support command(0x%x)!\n",
+ HCLGE_OPC_PF_RST_DONE);
+ return 0;
+ } else if (ret) {
+ dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
+ ret);
+ }
+
+ return ret;
+}
+
+static int hclge_reset_prepare_up(struct hclge_dev *hdev)
+{
+ int ret = 0;
+
+ switch (hdev->reset_type) {
+ case HNAE3_FUNC_RESET:
+ case HNAE3_FLR_RESET:
+ ret = hclge_set_all_vf_rst(hdev, false);
+ break;
+ case HNAE3_GLOBAL_RESET:
+ case HNAE3_IMP_RESET:
+ ret = hclge_set_rst_done(hdev);
+ break;
+ default:
+ break;
+ }
+
+ /* clear up the handshake status after re-initialize done */
+ hclge_reset_handshake(hdev, false);
+
+ return ret;
+}
+
+static int hclge_reset_stack(struct hclge_dev *hdev)
+{
+ int ret;
+
+ ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
+ if (ret)
+ return ret;
+
+ ret = hclge_reset_ae_dev(hdev->ae_dev);
+ if (ret)
+ return ret;
+
+ return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
+}
+
+static int hclge_reset_prepare(struct hclge_dev *hdev)
+{
+ int ret;
+
+ hdev->rst_stats.reset_cnt++;
+ /* perform reset of the stack & ae device for a client */
+ ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
+ if (ret)
+ return ret;
+
+ rtnl_lock();
+ ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ rtnl_unlock();
+ if (ret)
+ return ret;
+
+ return hclge_reset_prepare_wait(hdev);
+}
+
+static int hclge_reset_rebuild(struct hclge_dev *hdev)
+{
+ int ret;
+
+ hdev->rst_stats.hw_reset_done_cnt++;
+
+ ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
+ if (ret)
+ return ret;
+
+ rtnl_lock();
+ ret = hclge_reset_stack(hdev);
+ rtnl_unlock();
+ if (ret)
+ return ret;
+
+ hclge_clear_reset_cause(hdev);
+
+ ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
+ /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
+ * times
+ */
+ if (ret &&
+ hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
+ return ret;
+
+ ret = hclge_reset_prepare_up(hdev);
+ if (ret)
+ return ret;
+
+ rtnl_lock();
+ ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+ rtnl_unlock();
+ if (ret)
+ return ret;
+
+ ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
+ if (ret)
+ return ret;
+
+ hdev->last_reset_time = jiffies;
+ hdev->rst_stats.reset_fail_cnt = 0;
+ hdev->rst_stats.reset_done_cnt++;
+ clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
+
+ hclge_update_reset_level(hdev);
+
+ return 0;
+}
+
+static void hclge_reset(struct hclge_dev *hdev)
+{
+ if (hclge_reset_prepare(hdev))
+ goto err_reset;
+
+ if (hclge_reset_wait(hdev))
+ goto err_reset;
+
+ if (hclge_reset_rebuild(hdev))
+ goto err_reset;
+
+ return;
+
+err_reset:
+ if (hclge_reset_err_handle(hdev))
+ hclge_reset_task_schedule(hdev);
+}
+
+static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+ struct hclge_dev *hdev = ae_dev->priv;
+
+ /* We might end up getting called broadly because of 2 below cases:
+ * 1. Recoverable error was conveyed through APEI and only way to bring
+ * normalcy is to reset.
+ * 2. A new reset request from the stack due to timeout
+ *
+ * check if this is a new reset request and we are not here just because
+ * last reset attempt did not succeed and watchdog hit us again. We will
+ * know this if last reset request did not occur very recently (watchdog
+ * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
+ * In case of new request we reset the "reset level" to PF reset.
+ * And if it is a repeat reset request of the most recent one then we
+ * want to make sure we throttle the reset request. Therefore, we will
+ * not allow it again before 3*HZ times.
+ */
+
+ if (time_before(jiffies, (hdev->last_reset_time +
+ HCLGE_RESET_INTERVAL))) {
+ mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
+ return;
+ }
+
+ if (hdev->default_reset_request) {
+ hdev->reset_level =
+ hclge_get_reset_level(ae_dev,
+ &hdev->default_reset_request);
+ } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
+ hdev->reset_level = HNAE3_FUNC_RESET;
+ }
+
+ dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
+ hdev->reset_level);
+
+ /* request reset & schedule reset task */
+ set_bit(hdev->reset_level, &hdev->reset_request);
+ hclge_reset_task_schedule(hdev);
+
+ if (hdev->reset_level < HNAE3_GLOBAL_RESET)
+ hdev->reset_level++;
+}
+
+static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
+ enum hnae3_reset_type rst_type)
+{
+ struct hclge_dev *hdev = ae_dev->priv;
+
+ set_bit(rst_type, &hdev->default_reset_request);
+}
+
+static void hclge_reset_timer(struct timer_list *t)
+{
+ struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
+
+ /* if default_reset_request has no value, it means that this reset
+ * request has already be handled, so just return here
+ */
+ if (!hdev->default_reset_request)
+ return;
+
+ dev_info(&hdev->pdev->dev,
+ "triggering reset in reset timer\n");
+ hclge_reset_event(hdev->pdev, NULL);
+}
+
+static void hclge_reset_subtask(struct hclge_dev *hdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+
+ /* check if there is any ongoing reset in the hardware. This status can
+ * be checked from reset_pending. If there is then, we need to wait for
+ * hardware to complete reset.
+ * a. If we are able to figure out in reasonable time that hardware
+ * has fully resetted then, we can proceed with driver, client
+ * reset.
+ * b. else, we can come back later to check this status so re-sched
+ * now.
+ */
+ hdev->last_reset_time = jiffies;
+ hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
+ if (hdev->reset_type != HNAE3_NONE_RESET)
+ hclge_reset(hdev);
+
+ /* check if we got any *new* reset requests to be honored */
+ hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
+ if (hdev->reset_type != HNAE3_NONE_RESET)
+ hclge_do_reset(hdev);
+
+ hdev->reset_type = HNAE3_NONE_RESET;
+}
+
+static void hclge_handle_err_reset_request(struct hclge_dev *hdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ enum hnae3_reset_type reset_type;
+
+ if (ae_dev->hw_err_reset_req) {
+ reset_type = hclge_get_reset_level(ae_dev,
+ &ae_dev->hw_err_reset_req);
+ hclge_set_def_reset_request(ae_dev, reset_type);
+ }
+
+ if (hdev->default_reset_request && ae_dev->ops->reset_event)
+ ae_dev->ops->reset_event(hdev->pdev, NULL);
+
+ /* enable interrupt after error handling complete */
+ hclge_enable_vector(&hdev->misc_vector, true);
+}
+
+static void hclge_handle_err_recovery(struct hclge_dev *hdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+
+ ae_dev->hw_err_reset_req = 0;
+
+ if (hclge_find_error_source(hdev)) {
+ hclge_handle_error_info_log(ae_dev);
+ hclge_handle_mac_tnl(hdev);
+ }
+
+ hclge_handle_err_reset_request(hdev);
+}
+
+static void hclge_misc_err_recovery(struct hclge_dev *hdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ struct device *dev = &hdev->pdev->dev;
+ u32 msix_sts_reg;
+
+ msix_sts_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
+ if (msix_sts_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
+ if (hclge_handle_hw_msix_error
+ (hdev, &hdev->default_reset_request))
+ dev_info(dev, "received msix interrupt 0x%x\n",
+ msix_sts_reg);
+ }
+
+ hclge_handle_hw_ras_error(ae_dev);
+
+ hclge_handle_err_reset_request(hdev);
+}
+
+static void hclge_errhand_service_task(struct hclge_dev *hdev)
+{
+ if (!test_and_clear_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
+ return;
+
+ if (hnae3_dev_ras_imp_supported(hdev))
+ hclge_handle_err_recovery(hdev);
+ else
+ hclge_misc_err_recovery(hdev);
+}
+
+static void hclge_reset_service_task(struct hclge_dev *hdev)
+{
+ if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
+ return;
+
+ if (time_is_before_jiffies(hdev->last_rst_scheduled +
+ HCLGE_RESET_SCHED_TIMEOUT))
+ dev_warn(&hdev->pdev->dev,
+ "reset service task is scheduled after %ums on cpu%u!\n",
+ jiffies_to_msecs(jiffies - hdev->last_rst_scheduled),
+ smp_processor_id());
+
+ down(&hdev->reset_sem);
+ set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
+
+ hclge_reset_subtask(hdev);
+
+ clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
+ up(&hdev->reset_sem);
+}
+
+static void hclge_update_vport_alive(struct hclge_dev *hdev)
+{
+#define HCLGE_ALIVE_SECONDS_NORMAL 8
+
+ unsigned long alive_time = HCLGE_ALIVE_SECONDS_NORMAL * HZ;
+ int i;
+
+ /* start from vport 1 for PF is always alive */
+ for (i = 1; i < hdev->num_alloc_vport; i++) {
+ struct hclge_vport *vport = &hdev->vport[i];
+
+ if (!test_bit(HCLGE_VPORT_STATE_INITED, &vport->state) ||
+ !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
+ continue;
+ if (time_after(jiffies, vport->last_active_jiffies +
+ alive_time)) {
+ clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
+ dev_warn(&hdev->pdev->dev,
+ "VF %u heartbeat timeout\n",
+ i - HCLGE_VF_VPORT_START_NUM);
+ }
+ }
+}
+
+static void hclge_periodic_service_task(struct hclge_dev *hdev)
+{
+ unsigned long delta = round_jiffies_relative(HZ);
+
+ if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
+ return;
+
+ /* Always handle the link updating to make sure link state is
+ * updated when it is triggered by mbx.
+ */
+ hclge_update_link_status(hdev);
+ hclge_sync_mac_table(hdev);
+ hclge_sync_promisc_mode(hdev);
+ hclge_sync_fd_table(hdev);
+
+ if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
+ delta = jiffies - hdev->last_serv_processed;
+
+ if (delta < round_jiffies_relative(HZ)) {
+ delta = round_jiffies_relative(HZ) - delta;
+ goto out;
+ }
+ }
+
+ hdev->serv_processed_cnt++;
+ hclge_update_vport_alive(hdev);
+
+ if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
+ hdev->last_serv_processed = jiffies;
+ goto out;
+ }
+
+ if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
+ hclge_update_stats_for_all(hdev);
+
+ hclge_update_port_info(hdev);
+ hclge_sync_vlan_filter(hdev);
+
+ if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
+ hclge_rfs_filter_expire(hdev);
+
+ hdev->last_serv_processed = jiffies;
+
+out:
+ hclge_task_schedule(hdev, delta);
+}
+
+static void hclge_ptp_service_task(struct hclge_dev *hdev)
+{
+ unsigned long flags;
+
+ if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state) ||
+ !test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state) ||
+ !time_is_before_jiffies(hdev->ptp->tx_start + HZ))
+ return;
+
+ /* to prevent concurrence with the irq handler */
+ spin_lock_irqsave(&hdev->ptp->lock, flags);
+
+ /* check HCLGE_STATE_PTP_TX_HANDLING here again, since the irq
+ * handler may handle it just before spin_lock_irqsave().
+ */
+ if (test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state))
+ hclge_ptp_clean_tx_hwts(hdev);
+
+ spin_unlock_irqrestore(&hdev->ptp->lock, flags);
+}
+
+static void hclge_service_task(struct work_struct *work)
+{
+ struct hclge_dev *hdev =
+ container_of(work, struct hclge_dev, service_task.work);
+
+ hclge_errhand_service_task(hdev);
+ hclge_reset_service_task(hdev);
+ hclge_ptp_service_task(hdev);
+ hclge_mailbox_service_task(hdev);
+ hclge_periodic_service_task(hdev);
+
+ /* Handle error recovery, reset and mbx again in case periodical task
+ * delays the handling by calling hclge_task_schedule() in
+ * hclge_periodic_service_task().
+ */
+ hclge_errhand_service_task(hdev);
+ hclge_reset_service_task(hdev);
+ hclge_mailbox_service_task(hdev);
+}
+
+struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
+{
+ /* VF handle has no client */
+ if (!handle->client)
+ return container_of(handle, struct hclge_vport, nic);
+ else if (handle->client->type == HNAE3_CLIENT_ROCE)
+ return container_of(handle, struct hclge_vport, roce);
+ else
+ return container_of(handle, struct hclge_vport, nic);
+}
+
+static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx,
+ struct hnae3_vector_info *vector_info)
+{
+#define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 64
+
+ vector_info->vector = pci_irq_vector(hdev->pdev, idx);
+
+ /* need an extend offset to config vector >= 64 */
+ if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2)
+ vector_info->io_addr = hdev->hw.hw.io_base +
+ HCLGE_VECTOR_REG_BASE +
+ (idx - 1) * HCLGE_VECTOR_REG_OFFSET;
+ else
+ vector_info->io_addr = hdev->hw.hw.io_base +
+ HCLGE_VECTOR_EXT_REG_BASE +
+ (idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
+ HCLGE_VECTOR_REG_OFFSET_H +
+ (idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
+ HCLGE_VECTOR_REG_OFFSET;
+
+ hdev->vector_status[idx] = hdev->vport[0].vport_id;
+ hdev->vector_irq[idx] = vector_info->vector;
+}
+
+static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
+ struct hnae3_vector_info *vector_info)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hnae3_vector_info *vector = vector_info;
+ struct hclge_dev *hdev = vport->back;
+ int alloc = 0;
+ u16 i = 0;
+ u16 j;
+
+ vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
+ vector_num = min(hdev->num_msi_left, vector_num);
+
+ for (j = 0; j < vector_num; j++) {
+ while (++i < hdev->num_nic_msi) {
+ if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
+ hclge_get_vector_info(hdev, i, vector);
+ vector++;
+ alloc++;
+
+ break;
+ }
+ }
+ }
+ hdev->num_msi_left -= alloc;
+ hdev->num_msi_used += alloc;
+
+ return alloc;
+}
+
+static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
+{
+ int i;
+
+ for (i = 0; i < hdev->num_msi; i++)
+ if (vector == hdev->vector_irq[i])
+ return i;
+
+ return -EINVAL;
+}
+
+static int hclge_put_vector(struct hnae3_handle *handle, int vector)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ int vector_id;
+
+ vector_id = hclge_get_vector_index(hdev, vector);
+ if (vector_id < 0) {
+ dev_err(&hdev->pdev->dev,
+ "Get vector index fail. vector = %d\n", vector);
+ return vector_id;
+ }
+
+ hclge_free_vector(hdev, vector_id);
+
+ return 0;
+}
+
+static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
+ u8 *key, u8 *hfunc)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_comm_rss_cfg *rss_cfg = &vport->back->rss_cfg;
+
+ hclge_comm_get_rss_hash_info(rss_cfg, key, hfunc);
+
+ hclge_comm_get_rss_indir_tbl(rss_cfg, indir,
+ ae_dev->dev_specs.rss_ind_tbl_size);
+
+ return 0;
+}
+
+static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
+ int ret, i;
+
+ ret = hclge_comm_set_rss_hash_key(rss_cfg, &hdev->hw.hw, key, hfunc);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "invalid hfunc type %u\n", hfunc);
+ return ret;
+ }
+
+ /* Update the shadow RSS table with user specified qids */
+ for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
+ rss_cfg->rss_indirection_tbl[i] = indir[i];
+
+ /* Update the hardware */
+ return hclge_comm_set_rss_indir_table(ae_dev, &hdev->hw.hw,
+ rss_cfg->rss_indirection_tbl);
+}
+
+static int hclge_set_rss_tuple(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *nfc)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ ret = hclge_comm_set_rss_tuple(hdev->ae_dev, &hdev->hw.hw,
+ &hdev->rss_cfg, nfc);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to set rss tuple, ret = %d.\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hclge_get_rss_tuple(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *nfc)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ u8 tuple_sets;
+ int ret;
+
+ nfc->data = 0;
+
+ ret = hclge_comm_get_rss_tuple(&vport->back->rss_cfg, nfc->flow_type,
+ &tuple_sets);
+ if (ret || !tuple_sets)
+ return ret;
+
+ nfc->data = hclge_comm_convert_rss_tuple(tuple_sets);
+
+ return 0;
+}
+
+static int hclge_get_tc_size(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ return hdev->pf_rss_size_max;
+}
+
+static int hclge_init_rss_tc_mode(struct hclge_dev *hdev)
+{
+ struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
+ struct hclge_vport *vport = hdev->vport;
+ u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
+ u16 tc_valid[HCLGE_MAX_TC_NUM] = {0};
+ u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
+ struct hnae3_tc_info *tc_info;
+ u16 roundup_size;
+ u16 rss_size;
+ int i;
+
+ tc_info = &vport->nic.kinfo.tc_info;
+ for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
+ rss_size = tc_info->tqp_count[i];
+ tc_valid[i] = 0;
+
+ if (!(hdev->hw_tc_map & BIT(i)))
+ continue;
+
+ /* tc_size set to hardware is the log2 of roundup power of two
+ * of rss_size, the acutal queue size is limited by indirection
+ * table.
+ */
+ if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size ||
+ rss_size == 0) {
+ dev_err(&hdev->pdev->dev,
+ "Configure rss tc size failed, invalid TC_SIZE = %u\n",
+ rss_size);
+ return -EINVAL;
+ }
+
+ roundup_size = roundup_pow_of_two(rss_size);
+ roundup_size = ilog2(roundup_size);
+
+ tc_valid[i] = 1;
+ tc_size[i] = roundup_size;
+ tc_offset[i] = tc_info->tqp_offset[i];
+ }
+
+ return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, tc_valid,
+ tc_size);
+}
+
+int hclge_rss_init_hw(struct hclge_dev *hdev)
+{
+ u16 *rss_indir = hdev->rss_cfg.rss_indirection_tbl;
+ u8 *key = hdev->rss_cfg.rss_hash_key;
+ u8 hfunc = hdev->rss_cfg.rss_algo;
+ int ret;
+
+ ret = hclge_comm_set_rss_indir_table(hdev->ae_dev, &hdev->hw.hw,
+ rss_indir);
+ if (ret)
+ return ret;
+
+ ret = hclge_comm_set_rss_algo_key(&hdev->hw.hw, hfunc, key);
+ if (ret)
+ return ret;
+
+ ret = hclge_comm_set_rss_input_tuple(&hdev->hw.hw, &hdev->rss_cfg);
+ if (ret)
+ return ret;
+
+ return hclge_init_rss_tc_mode(hdev);
+}
+
+int hclge_bind_ring_with_vector(struct hclge_vport *vport,
+ int vector_id, bool en,
+ struct hnae3_ring_chain_node *ring_chain)
+{
+ struct hclge_dev *hdev = vport->back;
+ struct hnae3_ring_chain_node *node;
+ struct hclge_desc desc;
+ struct hclge_ctrl_vector_chain_cmd *req =
+ (struct hclge_ctrl_vector_chain_cmd *)desc.data;
+ enum hclge_comm_cmd_status status;
+ enum hclge_opcode_type op;
+ u16 tqp_type_and_id;
+ int i;
+
+ op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
+ hclge_cmd_setup_basic_desc(&desc, op, false);
+ req->int_vector_id_l = hnae3_get_field(vector_id,
+ HCLGE_VECTOR_ID_L_M,
+ HCLGE_VECTOR_ID_L_S);
+ req->int_vector_id_h = hnae3_get_field(vector_id,
+ HCLGE_VECTOR_ID_H_M,
+ HCLGE_VECTOR_ID_H_S);
+
+ i = 0;
+ for (node = ring_chain; node; node = node->next) {
+ tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
+ hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
+ HCLGE_INT_TYPE_S,
+ hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
+ hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
+ HCLGE_TQP_ID_S, node->tqp_index);
+ hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
+ HCLGE_INT_GL_IDX_S,
+ hnae3_get_field(node->int_gl_idx,
+ HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S));
+ req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
+ if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
+ req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
+ req->vfid = vport->vport_id;
+
+ status = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "Map TQP fail, status is %d.\n",
+ status);
+ return -EIO;
+ }
+ i = 0;
+
+ hclge_cmd_setup_basic_desc(&desc,
+ op,
+ false);
+ req->int_vector_id_l =
+ hnae3_get_field(vector_id,
+ HCLGE_VECTOR_ID_L_M,
+ HCLGE_VECTOR_ID_L_S);
+ req->int_vector_id_h =
+ hnae3_get_field(vector_id,
+ HCLGE_VECTOR_ID_H_M,
+ HCLGE_VECTOR_ID_H_S);
+ }
+ }
+
+ if (i > 0) {
+ req->int_cause_num = i;
+ req->vfid = vport->vport_id;
+ status = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "Map TQP fail, status is %d.\n", status);
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
+ struct hnae3_ring_chain_node *ring_chain)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ int vector_id;
+
+ vector_id = hclge_get_vector_index(hdev, vector);
+ if (vector_id < 0) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get vector index. vector=%d\n", vector);
+ return vector_id;
+ }
+
+ return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
+}
+
+static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
+ struct hnae3_ring_chain_node *ring_chain)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ int vector_id, ret;
+
+ if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+ return 0;
+
+ vector_id = hclge_get_vector_index(hdev, vector);
+ if (vector_id < 0) {
+ dev_err(&handle->pdev->dev,
+ "Get vector index fail. ret =%d\n", vector_id);
+ return vector_id;
+ }
+
+ ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
+ if (ret)
+ dev_err(&handle->pdev->dev,
+ "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
+ vector_id, ret);
+
+ return ret;
+}
+
+static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id,
+ bool en_uc, bool en_mc, bool en_bc)
+{
+ struct hclge_vport *vport = &hdev->vport[vf_id];
+ struct hnae3_handle *handle = &vport->nic;
+ struct hclge_promisc_cfg_cmd *req;
+ struct hclge_desc desc;
+ bool uc_tx_en = en_uc;
+ u8 promisc_cfg = 0;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
+
+ req = (struct hclge_promisc_cfg_cmd *)desc.data;
+ req->vf_id = vf_id;
+
+ if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags))
+ uc_tx_en = false;
+
+ hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_RX_EN, en_uc ? 1 : 0);
+ hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_RX_EN, en_mc ? 1 : 0);
+ hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_RX_EN, en_bc ? 1 : 0);
+ hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_TX_EN, uc_tx_en ? 1 : 0);
+ hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_TX_EN, en_mc ? 1 : 0);
+ hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_TX_EN, en_bc ? 1 : 0);
+ req->extend_promisc = promisc_cfg;
+
+ /* to be compatible with DEVICE_VERSION_V1/2 */
+ promisc_cfg = 0;
+ hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_UC, en_uc ? 1 : 0);
+ hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_MC, en_mc ? 1 : 0);
+ hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_BC, en_bc ? 1 : 0);
+ hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_TX_EN, 1);
+ hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_RX_EN, 1);
+ req->promisc = promisc_cfg;
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to set vport %u promisc mode, ret = %d.\n",
+ vf_id, ret);
+
+ return ret;
+}
+
+int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
+ bool en_mc_pmc, bool en_bc_pmc)
+{
+ return hclge_cmd_set_promisc_mode(vport->back, vport->vport_id,
+ en_uc_pmc, en_mc_pmc, en_bc_pmc);
+}
+
+static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
+ bool en_mc_pmc)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ bool en_bc_pmc = true;
+
+ /* For device whose version below V2, if broadcast promisc enabled,
+ * vlan filter is always bypassed. So broadcast promisc should be
+ * disabled until user enable promisc mode
+ */
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
+ en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
+
+ return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
+ en_bc_pmc);
+}
+
+static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+
+ set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
+}
+
+static void hclge_sync_fd_state(struct hclge_dev *hdev)
+{
+ if (hlist_empty(&hdev->fd_rule_list))
+ hdev->fd_active_type = HCLGE_FD_RULE_NONE;
+}
+
+static void hclge_fd_inc_rule_cnt(struct hclge_dev *hdev, u16 location)
+{
+ if (!test_bit(location, hdev->fd_bmap)) {
+ set_bit(location, hdev->fd_bmap);
+ hdev->hclge_fd_rule_num++;
+ }
+}
+
+static void hclge_fd_dec_rule_cnt(struct hclge_dev *hdev, u16 location)
+{
+ if (test_bit(location, hdev->fd_bmap)) {
+ clear_bit(location, hdev->fd_bmap);
+ hdev->hclge_fd_rule_num--;
+ }
+}
+
+static void hclge_fd_free_node(struct hclge_dev *hdev,
+ struct hclge_fd_rule *rule)
+{
+ hlist_del(&rule->rule_node);
+ kfree(rule);
+ hclge_sync_fd_state(hdev);
+}
+
+static void hclge_update_fd_rule_node(struct hclge_dev *hdev,
+ struct hclge_fd_rule *old_rule,
+ struct hclge_fd_rule *new_rule,
+ enum HCLGE_FD_NODE_STATE state)
+{
+ switch (state) {
+ case HCLGE_FD_TO_ADD:
+ case HCLGE_FD_ACTIVE:
+ /* 1) if the new state is TO_ADD, just replace the old rule
+ * with the same location, no matter its state, because the
+ * new rule will be configured to the hardware.
+ * 2) if the new state is ACTIVE, it means the new rule
+ * has been configured to the hardware, so just replace
+ * the old rule node with the same location.
+ * 3) for it doesn't add a new node to the list, so it's
+ * unnecessary to update the rule number and fd_bmap.
+ */
+ new_rule->rule_node.next = old_rule->rule_node.next;
+ new_rule->rule_node.pprev = old_rule->rule_node.pprev;
+ memcpy(old_rule, new_rule, sizeof(*old_rule));
+ kfree(new_rule);
+ break;
+ case HCLGE_FD_DELETED:
+ hclge_fd_dec_rule_cnt(hdev, old_rule->location);
+ hclge_fd_free_node(hdev, old_rule);
+ break;
+ case HCLGE_FD_TO_DEL:
+ /* if new request is TO_DEL, and old rule is existent
+ * 1) the state of old rule is TO_DEL, we need do nothing,
+ * because we delete rule by location, other rule content
+ * is unncessary.
+ * 2) the state of old rule is ACTIVE, we need to change its
+ * state to TO_DEL, so the rule will be deleted when periodic
+ * task being scheduled.
+ * 3) the state of old rule is TO_ADD, it means the rule hasn't
+ * been added to hardware, so we just delete the rule node from
+ * fd_rule_list directly.
+ */
+ if (old_rule->state == HCLGE_FD_TO_ADD) {
+ hclge_fd_dec_rule_cnt(hdev, old_rule->location);
+ hclge_fd_free_node(hdev, old_rule);
+ return;
+ }
+ old_rule->state = HCLGE_FD_TO_DEL;
+ break;
+ }
+}
+
+static struct hclge_fd_rule *hclge_find_fd_rule(struct hlist_head *hlist,
+ u16 location,
+ struct hclge_fd_rule **parent)
+{
+ struct hclge_fd_rule *rule;
+ struct hlist_node *node;
+
+ hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
+ if (rule->location == location)
+ return rule;
+ else if (rule->location > location)
+ return NULL;
+ /* record the parent node, use to keep the nodes in fd_rule_list
+ * in ascend order.
+ */
+ *parent = rule;
+ }
+
+ return NULL;
+}
+
+/* insert fd rule node in ascend order according to rule->location */
+static void hclge_fd_insert_rule_node(struct hlist_head *hlist,
+ struct hclge_fd_rule *rule,
+ struct hclge_fd_rule *parent)
+{
+ INIT_HLIST_NODE(&rule->rule_node);
+
+ if (parent)
+ hlist_add_behind(&rule->rule_node, &parent->rule_node);
+ else
+ hlist_add_head(&rule->rule_node, hlist);
+}
+
+static int hclge_fd_set_user_def_cmd(struct hclge_dev *hdev,
+ struct hclge_fd_user_def_cfg *cfg)
+{
+ struct hclge_fd_user_def_cfg_cmd *req;
+ struct hclge_desc desc;
+ u16 data = 0;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_USER_DEF_OP, false);
+
+ req = (struct hclge_fd_user_def_cfg_cmd *)desc.data;
+
+ hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[0].ref_cnt > 0);
+ hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
+ HCLGE_FD_USER_DEF_OFT_S, cfg[0].offset);
+ req->ol2_cfg = cpu_to_le16(data);
+
+ data = 0;
+ hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[1].ref_cnt > 0);
+ hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
+ HCLGE_FD_USER_DEF_OFT_S, cfg[1].offset);
+ req->ol3_cfg = cpu_to_le16(data);
+
+ data = 0;
+ hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[2].ref_cnt > 0);
+ hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
+ HCLGE_FD_USER_DEF_OFT_S, cfg[2].offset);
+ req->ol4_cfg = cpu_to_le16(data);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to set fd user def data, ret= %d\n", ret);
+ return ret;
+}
+
+static void hclge_sync_fd_user_def_cfg(struct hclge_dev *hdev, bool locked)
+{
+ int ret;
+
+ if (!test_and_clear_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state))
+ return;
+
+ if (!locked)
+ spin_lock_bh(&hdev->fd_rule_lock);
+
+ ret = hclge_fd_set_user_def_cmd(hdev, hdev->fd_cfg.user_def_cfg);
+ if (ret)
+ set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
+
+ if (!locked)
+ spin_unlock_bh(&hdev->fd_rule_lock);
+}
+
+static int hclge_fd_check_user_def_refcnt(struct hclge_dev *hdev,
+ struct hclge_fd_rule *rule)
+{
+ struct hlist_head *hlist = &hdev->fd_rule_list;
+ struct hclge_fd_rule *fd_rule, *parent = NULL;
+ struct hclge_fd_user_def_info *info, *old_info;
+ struct hclge_fd_user_def_cfg *cfg;
+
+ if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
+ rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
+ return 0;
+
+ /* for valid layer is start from 1, so need minus 1 to get the cfg */
+ cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
+ info = &rule->ep.user_def;
+
+ if (!cfg->ref_cnt || cfg->offset == info->offset)
+ return 0;
+
+ if (cfg->ref_cnt > 1)
+ goto error;
+
+ fd_rule = hclge_find_fd_rule(hlist, rule->location, &parent);
+ if (fd_rule) {
+ old_info = &fd_rule->ep.user_def;
+ if (info->layer == old_info->layer)
+ return 0;
+ }
+
+error:
+ dev_err(&hdev->pdev->dev,
+ "No available offset for layer%d fd rule, each layer only support one user def offset.\n",
+ info->layer + 1);
+ return -ENOSPC;
+}
+
+static void hclge_fd_inc_user_def_refcnt(struct hclge_dev *hdev,
+ struct hclge_fd_rule *rule)
+{
+ struct hclge_fd_user_def_cfg *cfg;
+
+ if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
+ rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
+ return;
+
+ cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
+ if (!cfg->ref_cnt) {
+ cfg->offset = rule->ep.user_def.offset;
+ set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
+ }
+ cfg->ref_cnt++;
+}
+
+static void hclge_fd_dec_user_def_refcnt(struct hclge_dev *hdev,
+ struct hclge_fd_rule *rule)
+{
+ struct hclge_fd_user_def_cfg *cfg;
+
+ if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
+ rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
+ return;
+
+ cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
+ if (!cfg->ref_cnt)
+ return;
+
+ cfg->ref_cnt--;
+ if (!cfg->ref_cnt) {
+ cfg->offset = 0;
+ set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
+ }
+}
+
+static void hclge_update_fd_list(struct hclge_dev *hdev,
+ enum HCLGE_FD_NODE_STATE state, u16 location,
+ struct hclge_fd_rule *new_rule)
+{
+ struct hlist_head *hlist = &hdev->fd_rule_list;
+ struct hclge_fd_rule *fd_rule, *parent = NULL;
+
+ fd_rule = hclge_find_fd_rule(hlist, location, &parent);
+ if (fd_rule) {
+ hclge_fd_dec_user_def_refcnt(hdev, fd_rule);
+ if (state == HCLGE_FD_ACTIVE)
+ hclge_fd_inc_user_def_refcnt(hdev, new_rule);
+ hclge_sync_fd_user_def_cfg(hdev, true);
+
+ hclge_update_fd_rule_node(hdev, fd_rule, new_rule, state);
+ return;
+ }
+
+ /* it's unlikely to fail here, because we have checked the rule
+ * exist before.
+ */
+ if (unlikely(state == HCLGE_FD_TO_DEL || state == HCLGE_FD_DELETED)) {
+ dev_warn(&hdev->pdev->dev,
+ "failed to delete fd rule %u, it's inexistent\n",
+ location);
+ return;
+ }
+
+ hclge_fd_inc_user_def_refcnt(hdev, new_rule);
+ hclge_sync_fd_user_def_cfg(hdev, true);
+
+ hclge_fd_insert_rule_node(hlist, new_rule, parent);
+ hclge_fd_inc_rule_cnt(hdev, new_rule->location);
+
+ if (state == HCLGE_FD_TO_ADD) {
+ set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
+ hclge_task_schedule(hdev, 0);
+ }
+}
+
+static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
+{
+ struct hclge_get_fd_mode_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
+
+ req = (struct hclge_get_fd_mode_cmd *)desc.data;
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
+ return ret;
+ }
+
+ *fd_mode = req->mode;
+
+ return ret;
+}
+
+static int hclge_get_fd_allocation(struct hclge_dev *hdev,
+ u32 *stage1_entry_num,
+ u32 *stage2_entry_num,
+ u16 *stage1_counter_num,
+ u16 *stage2_counter_num)
+{
+ struct hclge_get_fd_allocation_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
+
+ req = (struct hclge_get_fd_allocation_cmd *)desc.data;
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
+ ret);
+ return ret;
+ }
+
+ *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
+ *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
+ *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
+ *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
+
+ return ret;
+}
+
+static int hclge_set_fd_key_config(struct hclge_dev *hdev,
+ enum HCLGE_FD_STAGE stage_num)
+{
+ struct hclge_set_fd_key_config_cmd *req;
+ struct hclge_fd_key_cfg *stage;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
+
+ req = (struct hclge_set_fd_key_config_cmd *)desc.data;
+ stage = &hdev->fd_cfg.key_cfg[stage_num];
+ req->stage = stage_num;
+ req->key_select = stage->key_sel;
+ req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
+ req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
+ req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
+ req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
+ req->tuple_mask = cpu_to_le32(~stage->tuple_active);
+ req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
+
+ return ret;
+}
+
+static void hclge_fd_disable_user_def(struct hclge_dev *hdev)
+{
+ struct hclge_fd_user_def_cfg *cfg = hdev->fd_cfg.user_def_cfg;
+
+ spin_lock_bh(&hdev->fd_rule_lock);
+ memset(cfg, 0, sizeof(hdev->fd_cfg.user_def_cfg));
+ spin_unlock_bh(&hdev->fd_rule_lock);
+
+ hclge_fd_set_user_def_cmd(hdev, cfg);
+}
+
+static int hclge_init_fd_config(struct hclge_dev *hdev)
+{
+#define LOW_2_WORDS 0x03
+ struct hclge_fd_key_cfg *key_cfg;
+ int ret;
+
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
+ return 0;
+
+ ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
+ if (ret)
+ return ret;
+
+ switch (hdev->fd_cfg.fd_mode) {
+ case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
+ hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
+ break;
+ case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
+ hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
+ break;
+ default:
+ dev_err(&hdev->pdev->dev,
+ "Unsupported flow director mode %u\n",
+ hdev->fd_cfg.fd_mode);
+ return -EOPNOTSUPP;
+ }
+
+ key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
+ key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE;
+ key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
+ key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
+ key_cfg->outer_sipv6_word_en = 0;
+ key_cfg->outer_dipv6_word_en = 0;
+
+ key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
+ BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
+ BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
+ BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
+
+ /* If use max 400bit key, we can support tuples for ether type */
+ if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
+ key_cfg->tuple_active |=
+ BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
+ key_cfg->tuple_active |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
+ }
+
+ /* roce_type is used to filter roce frames
+ * dst_vport is used to specify the rule
+ */
+ key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
+
+ ret = hclge_get_fd_allocation(hdev,
+ &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
+ &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
+ &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
+ &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
+ if (ret)
+ return ret;
+
+ return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
+}
+
+static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
+ int loc, u8 *key, bool is_add)
+{
+ struct hclge_fd_tcam_config_1_cmd *req1;
+ struct hclge_fd_tcam_config_2_cmd *req2;
+ struct hclge_fd_tcam_config_3_cmd *req3;
+ struct hclge_desc desc[3];
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
+ desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
+
+ req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
+ req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
+ req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
+
+ req1->stage = stage;
+ req1->xy_sel = sel_x ? 1 : 0;
+ hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
+ req1->index = cpu_to_le32(loc);
+ req1->entry_vld = sel_x ? is_add : 0;
+
+ if (key) {
+ memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
+ memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
+ sizeof(req2->tcam_data));
+ memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
+ sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
+ }
+
+ ret = hclge_cmd_send(&hdev->hw, desc, 3);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "config tcam key fail, ret=%d\n",
+ ret);
+
+ return ret;
+}
+
+static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
+ struct hclge_fd_ad_data *action)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ struct hclge_fd_ad_config_cmd *req;
+ struct hclge_desc desc;
+ u64 ad_data = 0;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
+
+ req = (struct hclge_fd_ad_config_cmd *)desc.data;
+ req->index = cpu_to_le32(loc);
+ req->stage = stage;
+
+ hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
+ action->write_rule_id_to_bd);
+ hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
+ action->rule_id);
+ if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) {
+ hnae3_set_bit(ad_data, HCLGE_FD_AD_TC_OVRD_B,
+ action->override_tc);
+ hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M,
+ HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size);
+ }
+ ad_data <<= 32;
+ hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
+ hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
+ action->forward_to_direct_queue);
+ hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
+ action->queue_id);
+ hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
+ hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
+ HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
+ hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
+ hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
+ action->counter_id);
+
+ req->ad_data = cpu_to_le64(ad_data);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
+
+ return ret;
+}
+
+static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
+ struct hclge_fd_rule *rule)
+{
+ int offset, moffset, ip_offset;
+ enum HCLGE_FD_KEY_OPT key_opt;
+ u16 tmp_x_s, tmp_y_s;
+ u32 tmp_x_l, tmp_y_l;
+ u8 *p = (u8 *)rule;
+ int i;
+
+ if (rule->unused_tuple & BIT(tuple_bit))
+ return true;
+
+ key_opt = tuple_key_info[tuple_bit].key_opt;
+ offset = tuple_key_info[tuple_bit].offset;
+ moffset = tuple_key_info[tuple_bit].moffset;
+
+ switch (key_opt) {
+ case KEY_OPT_U8:
+ calc_x(*key_x, p[offset], p[moffset]);
+ calc_y(*key_y, p[offset], p[moffset]);
+
+ return true;
+ case KEY_OPT_LE16:
+ calc_x(tmp_x_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
+ calc_y(tmp_y_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
+ *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
+ *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
+
+ return true;
+ case KEY_OPT_LE32:
+ calc_x(tmp_x_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
+ calc_y(tmp_y_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
+ *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
+ *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
+
+ return true;
+ case KEY_OPT_MAC:
+ for (i = 0; i < ETH_ALEN; i++) {
+ calc_x(key_x[ETH_ALEN - 1 - i], p[offset + i],
+ p[moffset + i]);
+ calc_y(key_y[ETH_ALEN - 1 - i], p[offset + i],
+ p[moffset + i]);
+ }
+
+ return true;
+ case KEY_OPT_IP:
+ ip_offset = IPV4_INDEX * sizeof(u32);
+ calc_x(tmp_x_l, *(u32 *)(&p[offset + ip_offset]),
+ *(u32 *)(&p[moffset + ip_offset]));
+ calc_y(tmp_y_l, *(u32 *)(&p[offset + ip_offset]),
+ *(u32 *)(&p[moffset + ip_offset]));
+ *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
+ *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
+
+ return true;
+ default:
+ return false;
+ }
+}
+
+static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
+ u8 vf_id, u8 network_port_id)
+{
+ u32 port_number = 0;
+
+ if (port_type == HOST_PORT) {
+ hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
+ pf_id);
+ hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
+ vf_id);
+ hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
+ } else {
+ hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
+ HCLGE_NETWORK_PORT_ID_S, network_port_id);
+ hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
+ }
+
+ return port_number;
+}
+
+static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
+ __le32 *key_x, __le32 *key_y,
+ struct hclge_fd_rule *rule)
+{
+ u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
+ u8 cur_pos = 0, tuple_size, shift_bits;
+ unsigned int i;
+
+ for (i = 0; i < MAX_META_DATA; i++) {
+ tuple_size = meta_data_key_info[i].key_length;
+ tuple_bit = key_cfg->meta_data_active & BIT(i);
+
+ switch (tuple_bit) {
+ case BIT(ROCE_TYPE):
+ hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
+ cur_pos += tuple_size;
+ break;
+ case BIT(DST_VPORT):
+ port_number = hclge_get_port_number(HOST_PORT, 0,
+ rule->vf_id, 0);
+ hnae3_set_field(meta_data,
+ GENMASK(cur_pos + tuple_size, cur_pos),
+ cur_pos, port_number);
+ cur_pos += tuple_size;
+ break;
+ default:
+ break;
+ }
+ }
+
+ calc_x(tmp_x, meta_data, 0xFFFFFFFF);
+ calc_y(tmp_y, meta_data, 0xFFFFFFFF);
+ shift_bits = sizeof(meta_data) * 8 - cur_pos;
+
+ *key_x = cpu_to_le32(tmp_x << shift_bits);
+ *key_y = cpu_to_le32(tmp_y << shift_bits);
+}
+
+/* A complete key is combined with meta data key and tuple key.
+ * Meta data key is stored at the MSB region, and tuple key is stored at
+ * the LSB region, unused bits will be filled 0.
+ */
+static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
+ struct hclge_fd_rule *rule)
+{
+ struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
+ u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
+ u8 *cur_key_x, *cur_key_y;
+ u8 meta_data_region;
+ u8 tuple_size;
+ int ret;
+ u32 i;
+
+ memset(key_x, 0, sizeof(key_x));
+ memset(key_y, 0, sizeof(key_y));
+ cur_key_x = key_x;
+ cur_key_y = key_y;
+
+ for (i = 0; i < MAX_TUPLE; i++) {
+ bool tuple_valid;
+
+ tuple_size = tuple_key_info[i].key_length / 8;
+ if (!(key_cfg->tuple_active & BIT(i)))
+ continue;
+
+ tuple_valid = hclge_fd_convert_tuple(i, cur_key_x,
+ cur_key_y, rule);
+ if (tuple_valid) {
+ cur_key_x += tuple_size;
+ cur_key_y += tuple_size;
+ }
+ }
+
+ meta_data_region = hdev->fd_cfg.max_key_length / 8 -
+ MAX_META_DATA_LENGTH / 8;
+
+ hclge_fd_convert_meta_data(key_cfg,
+ (__le32 *)(key_x + meta_data_region),
+ (__le32 *)(key_y + meta_data_region),
+ rule);
+
+ ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
+ true);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "fd key_y config fail, loc=%u, ret=%d\n",
+ rule->queue_id, ret);
+ return ret;
+ }
+
+ ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
+ true);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "fd key_x config fail, loc=%u, ret=%d\n",
+ rule->queue_id, ret);
+ return ret;
+}
+
+static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
+ struct hclge_fd_rule *rule)
+{
+ struct hclge_vport *vport = hdev->vport;
+ struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
+ struct hclge_fd_ad_data ad_data;
+
+ memset(&ad_data, 0, sizeof(struct hclge_fd_ad_data));
+ ad_data.ad_id = rule->location;
+
+ if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
+ ad_data.drop_packet = true;
+ } else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) {
+ ad_data.override_tc = true;
+ ad_data.queue_id =
+ kinfo->tc_info.tqp_offset[rule->cls_flower.tc];
+ ad_data.tc_size =
+ ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]);
+ } else {
+ ad_data.forward_to_direct_queue = true;
+ ad_data.queue_id = rule->queue_id;
+ }
+
+ if (hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1]) {
+ ad_data.use_counter = true;
+ ad_data.counter_id = rule->vf_id %
+ hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1];
+ } else {
+ ad_data.use_counter = false;
+ ad_data.counter_id = 0;
+ }
+
+ ad_data.use_next_stage = false;
+ ad_data.next_input_key = 0;
+
+ ad_data.write_rule_id_to_bd = true;
+ ad_data.rule_id = rule->location;
+
+ return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
+}
+
+static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
+ u32 *unused_tuple)
+{
+ if (!spec || !unused_tuple)
+ return -EINVAL;
+
+ *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
+
+ if (!spec->ip4src)
+ *unused_tuple |= BIT(INNER_SRC_IP);
+
+ if (!spec->ip4dst)
+ *unused_tuple |= BIT(INNER_DST_IP);
+
+ if (!spec->psrc)
+ *unused_tuple |= BIT(INNER_SRC_PORT);
+
+ if (!spec->pdst)
+ *unused_tuple |= BIT(INNER_DST_PORT);
+
+ if (!spec->tos)
+ *unused_tuple |= BIT(INNER_IP_TOS);
+
+ return 0;
+}
+
+static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
+ u32 *unused_tuple)
+{
+ if (!spec || !unused_tuple)
+ return -EINVAL;
+
+ *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
+ BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
+
+ if (!spec->ip4src)
+ *unused_tuple |= BIT(INNER_SRC_IP);
+
+ if (!spec->ip4dst)
+ *unused_tuple |= BIT(INNER_DST_IP);
+
+ if (!spec->tos)
+ *unused_tuple |= BIT(INNER_IP_TOS);
+
+ if (!spec->proto)
+ *unused_tuple |= BIT(INNER_IP_PROTO);
+
+ if (spec->l4_4_bytes)
+ return -EOPNOTSUPP;
+
+ if (spec->ip_ver != ETH_RX_NFC_IP4)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
+ u32 *unused_tuple)
+{
+ if (!spec || !unused_tuple)
+ return -EINVAL;
+
+ *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
+
+ /* check whether src/dst ip address used */
+ if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
+ *unused_tuple |= BIT(INNER_SRC_IP);
+
+ if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
+ *unused_tuple |= BIT(INNER_DST_IP);
+
+ if (!spec->psrc)
+ *unused_tuple |= BIT(INNER_SRC_PORT);
+
+ if (!spec->pdst)
+ *unused_tuple |= BIT(INNER_DST_PORT);
+
+ if (!spec->tclass)
+ *unused_tuple |= BIT(INNER_IP_TOS);
+
+ return 0;
+}
+
+static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
+ u32 *unused_tuple)
+{
+ if (!spec || !unused_tuple)
+ return -EINVAL;
+
+ *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
+ BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
+
+ /* check whether src/dst ip address used */
+ if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
+ *unused_tuple |= BIT(INNER_SRC_IP);
+
+ if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
+ *unused_tuple |= BIT(INNER_DST_IP);
+
+ if (!spec->l4_proto)
+ *unused_tuple |= BIT(INNER_IP_PROTO);
+
+ if (!spec->tclass)
+ *unused_tuple |= BIT(INNER_IP_TOS);
+
+ if (spec->l4_4_bytes)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
+{
+ if (!spec || !unused_tuple)
+ return -EINVAL;
+
+ *unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
+ BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
+ BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
+
+ if (is_zero_ether_addr(spec->h_source))
+ *unused_tuple |= BIT(INNER_SRC_MAC);
+
+ if (is_zero_ether_addr(spec->h_dest))
+ *unused_tuple |= BIT(INNER_DST_MAC);
+
+ if (!spec->h_proto)
+ *unused_tuple |= BIT(INNER_ETH_TYPE);
+
+ return 0;
+}
+
+static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
+ struct ethtool_rx_flow_spec *fs,
+ u32 *unused_tuple)
+{
+ if (fs->flow_type & FLOW_EXT) {
+ if (fs->h_ext.vlan_etype) {
+ dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (!fs->h_ext.vlan_tci)
+ *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
+
+ if (fs->m_ext.vlan_tci &&
+ be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
+ dev_err(&hdev->pdev->dev,
+ "failed to config vlan_tci, invalid vlan_tci: %u, max is %d.\n",
+ ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
+ return -EINVAL;
+ }
+ } else {
+ *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
+ }
+
+ if (fs->flow_type & FLOW_MAC_EXT) {
+ if (hdev->fd_cfg.fd_mode !=
+ HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
+ dev_err(&hdev->pdev->dev,
+ "FLOW_MAC_EXT is not supported in current fd mode!\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (is_zero_ether_addr(fs->h_ext.h_dest))
+ *unused_tuple |= BIT(INNER_DST_MAC);
+ else
+ *unused_tuple &= ~BIT(INNER_DST_MAC);
+ }
+
+ return 0;
+}
+
+static int hclge_fd_get_user_def_layer(u32 flow_type, u32 *unused_tuple,
+ struct hclge_fd_user_def_info *info)
+{
+ switch (flow_type) {
+ case ETHER_FLOW:
+ info->layer = HCLGE_FD_USER_DEF_L2;
+ *unused_tuple &= ~BIT(INNER_L2_RSV);
+ break;
+ case IP_USER_FLOW:
+ case IPV6_USER_FLOW:
+ info->layer = HCLGE_FD_USER_DEF_L3;
+ *unused_tuple &= ~BIT(INNER_L3_RSV);
+ break;
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ info->layer = HCLGE_FD_USER_DEF_L4;
+ *unused_tuple &= ~BIT(INNER_L4_RSV);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static bool hclge_fd_is_user_def_all_masked(struct ethtool_rx_flow_spec *fs)
+{
+ return be32_to_cpu(fs->m_ext.data[1] | fs->m_ext.data[0]) == 0;
+}
+
+static int hclge_fd_parse_user_def_field(struct hclge_dev *hdev,
+ struct ethtool_rx_flow_spec *fs,
+ u32 *unused_tuple,
+ struct hclge_fd_user_def_info *info)
+{
+ u32 tuple_active = hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1].tuple_active;
+ u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
+ u16 data, offset, data_mask, offset_mask;
+ int ret;
+
+ info->layer = HCLGE_FD_USER_DEF_NONE;
+ *unused_tuple |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
+
+ if (!(fs->flow_type & FLOW_EXT) || hclge_fd_is_user_def_all_masked(fs))
+ return 0;
+
+ /* user-def data from ethtool is 64 bit value, the bit0~15 is used
+ * for data, and bit32~47 is used for offset.
+ */
+ data = be32_to_cpu(fs->h_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
+ data_mask = be32_to_cpu(fs->m_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
+ offset = be32_to_cpu(fs->h_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
+ offset_mask = be32_to_cpu(fs->m_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
+
+ if (!(tuple_active & HCLGE_FD_TUPLE_USER_DEF_TUPLES)) {
+ dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (offset > HCLGE_FD_MAX_USER_DEF_OFFSET) {
+ dev_err(&hdev->pdev->dev,
+ "user-def offset[%u] should be no more than %u\n",
+ offset, HCLGE_FD_MAX_USER_DEF_OFFSET);
+ return -EINVAL;
+ }
+
+ if (offset_mask != HCLGE_FD_USER_DEF_OFFSET_UNMASK) {
+ dev_err(&hdev->pdev->dev, "user-def offset can't be masked\n");
+ return -EINVAL;
+ }
+
+ ret = hclge_fd_get_user_def_layer(flow_type, unused_tuple, info);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "unsupported flow type for user-def bytes, ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ info->data = data;
+ info->data_mask = data_mask;
+ info->offset = offset;
+
+ return 0;
+}
+
+static int hclge_fd_check_spec(struct hclge_dev *hdev,
+ struct ethtool_rx_flow_spec *fs,
+ u32 *unused_tuple,
+ struct hclge_fd_user_def_info *info)
+{
+ u32 flow_type;
+ int ret;
+
+ if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
+ dev_err(&hdev->pdev->dev,
+ "failed to config fd rules, invalid rule location: %u, max is %u\n.",
+ fs->location,
+ hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
+ return -EINVAL;
+ }
+
+ ret = hclge_fd_parse_user_def_field(hdev, fs, unused_tuple, info);
+ if (ret)
+ return ret;
+
+ flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
+ switch (flow_type) {
+ case SCTP_V4_FLOW:
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
+ unused_tuple);
+ break;
+ case IP_USER_FLOW:
+ ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
+ unused_tuple);
+ break;
+ case SCTP_V6_FLOW:
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
+ unused_tuple);
+ break;
+ case IPV6_USER_FLOW:
+ ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
+ unused_tuple);
+ break;
+ case ETHER_FLOW:
+ if (hdev->fd_cfg.fd_mode !=
+ HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
+ dev_err(&hdev->pdev->dev,
+ "ETHER_FLOW is not supported in current fd mode!\n");
+ return -EOPNOTSUPP;
+ }
+
+ ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
+ unused_tuple);
+ break;
+ default:
+ dev_err(&hdev->pdev->dev,
+ "unsupported protocol type, protocol type = %#x\n",
+ flow_type);
+ return -EOPNOTSUPP;
+ }
+
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to check flow union tuple, ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
+}
+
+static void hclge_fd_get_tcpip4_tuple(struct ethtool_rx_flow_spec *fs,
+ struct hclge_fd_rule *rule, u8 ip_proto)
+{
+ rule->tuples.src_ip[IPV4_INDEX] =
+ be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
+ rule->tuples_mask.src_ip[IPV4_INDEX] =
+ be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
+
+ rule->tuples.dst_ip[IPV4_INDEX] =
+ be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
+ rule->tuples_mask.dst_ip[IPV4_INDEX] =
+ be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
+
+ rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
+ rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
+
+ rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
+ rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
+
+ rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
+ rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
+
+ rule->tuples.ether_proto = ETH_P_IP;
+ rule->tuples_mask.ether_proto = 0xFFFF;
+
+ rule->tuples.ip_proto = ip_proto;
+ rule->tuples_mask.ip_proto = 0xFF;
+}
+
+static void hclge_fd_get_ip4_tuple(struct ethtool_rx_flow_spec *fs,
+ struct hclge_fd_rule *rule)
+{
+ rule->tuples.src_ip[IPV4_INDEX] =
+ be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
+ rule->tuples_mask.src_ip[IPV4_INDEX] =
+ be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
+
+ rule->tuples.dst_ip[IPV4_INDEX] =
+ be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
+ rule->tuples_mask.dst_ip[IPV4_INDEX] =
+ be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
+
+ rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
+ rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
+
+ rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
+ rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
+
+ rule->tuples.ether_proto = ETH_P_IP;
+ rule->tuples_mask.ether_proto = 0xFFFF;
+}
+
+static void hclge_fd_get_tcpip6_tuple(struct ethtool_rx_flow_spec *fs,
+ struct hclge_fd_rule *rule, u8 ip_proto)
+{
+ be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.tcp_ip6_spec.ip6src,
+ IPV6_SIZE);
+ be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.tcp_ip6_spec.ip6src,
+ IPV6_SIZE);
+
+ be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.tcp_ip6_spec.ip6dst,
+ IPV6_SIZE);
+ be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.tcp_ip6_spec.ip6dst,
+ IPV6_SIZE);
+
+ rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
+ rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
+
+ rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
+ rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
+
+ rule->tuples.ether_proto = ETH_P_IPV6;
+ rule->tuples_mask.ether_proto = 0xFFFF;
+
+ rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
+ rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
+
+ rule->tuples.ip_proto = ip_proto;
+ rule->tuples_mask.ip_proto = 0xFF;
+}
+
+static void hclge_fd_get_ip6_tuple(struct ethtool_rx_flow_spec *fs,
+ struct hclge_fd_rule *rule)
+{
+ be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.usr_ip6_spec.ip6src,
+ IPV6_SIZE);
+ be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.usr_ip6_spec.ip6src,
+ IPV6_SIZE);
+
+ be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.usr_ip6_spec.ip6dst,
+ IPV6_SIZE);
+ be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.usr_ip6_spec.ip6dst,
+ IPV6_SIZE);
+
+ rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
+ rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
+
+ rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
+ rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
+
+ rule->tuples.ether_proto = ETH_P_IPV6;
+ rule->tuples_mask.ether_proto = 0xFFFF;
+}
+
+static void hclge_fd_get_ether_tuple(struct ethtool_rx_flow_spec *fs,
+ struct hclge_fd_rule *rule)
+{
+ ether_addr_copy(rule->tuples.src_mac, fs->h_u.ether_spec.h_source);
+ ether_addr_copy(rule->tuples_mask.src_mac, fs->m_u.ether_spec.h_source);
+
+ ether_addr_copy(rule->tuples.dst_mac, fs->h_u.ether_spec.h_dest);
+ ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_u.ether_spec.h_dest);
+
+ rule->tuples.ether_proto = be16_to_cpu(fs->h_u.ether_spec.h_proto);
+ rule->tuples_mask.ether_proto = be16_to_cpu(fs->m_u.ether_spec.h_proto);
+}
+
+static void hclge_fd_get_user_def_tuple(struct hclge_fd_user_def_info *info,
+ struct hclge_fd_rule *rule)
+{
+ switch (info->layer) {
+ case HCLGE_FD_USER_DEF_L2:
+ rule->tuples.l2_user_def = info->data;
+ rule->tuples_mask.l2_user_def = info->data_mask;
+ break;
+ case HCLGE_FD_USER_DEF_L3:
+ rule->tuples.l3_user_def = info->data;
+ rule->tuples_mask.l3_user_def = info->data_mask;
+ break;
+ case HCLGE_FD_USER_DEF_L4:
+ rule->tuples.l4_user_def = (u32)info->data << 16;
+ rule->tuples_mask.l4_user_def = (u32)info->data_mask << 16;
+ break;
+ default:
+ break;
+ }
+
+ rule->ep.user_def = *info;
+}
+
+static int hclge_fd_get_tuple(struct ethtool_rx_flow_spec *fs,
+ struct hclge_fd_rule *rule,
+ struct hclge_fd_user_def_info *info)
+{
+ u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
+
+ switch (flow_type) {
+ case SCTP_V4_FLOW:
+ hclge_fd_get_tcpip4_tuple(fs, rule, IPPROTO_SCTP);
+ break;
+ case TCP_V4_FLOW:
+ hclge_fd_get_tcpip4_tuple(fs, rule, IPPROTO_TCP);
+ break;
+ case UDP_V4_FLOW:
+ hclge_fd_get_tcpip4_tuple(fs, rule, IPPROTO_UDP);
+ break;
+ case IP_USER_FLOW:
+ hclge_fd_get_ip4_tuple(fs, rule);
+ break;
+ case SCTP_V6_FLOW:
+ hclge_fd_get_tcpip6_tuple(fs, rule, IPPROTO_SCTP);
+ break;
+ case TCP_V6_FLOW:
+ hclge_fd_get_tcpip6_tuple(fs, rule, IPPROTO_TCP);
+ break;
+ case UDP_V6_FLOW:
+ hclge_fd_get_tcpip6_tuple(fs, rule, IPPROTO_UDP);
+ break;
+ case IPV6_USER_FLOW:
+ hclge_fd_get_ip6_tuple(fs, rule);
+ break;
+ case ETHER_FLOW:
+ hclge_fd_get_ether_tuple(fs, rule);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (fs->flow_type & FLOW_EXT) {
+ rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
+ rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
+ hclge_fd_get_user_def_tuple(info, rule);
+ }
+
+ if (fs->flow_type & FLOW_MAC_EXT) {
+ ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
+ ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
+ }
+
+ return 0;
+}
+
+static int hclge_fd_config_rule(struct hclge_dev *hdev,
+ struct hclge_fd_rule *rule)
+{
+ int ret;
+
+ ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
+ if (ret)
+ return ret;
+
+ return hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
+}
+
+static int hclge_add_fd_entry_common(struct hclge_dev *hdev,
+ struct hclge_fd_rule *rule)
+{
+ int ret;
+
+ spin_lock_bh(&hdev->fd_rule_lock);
+
+ if (hdev->fd_active_type != rule->rule_type &&
+ (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
+ hdev->fd_active_type == HCLGE_FD_EP_ACTIVE)) {
+ dev_err(&hdev->pdev->dev,
+ "mode conflict(new type %d, active type %d), please delete existent rules first\n",
+ rule->rule_type, hdev->fd_active_type);
+ spin_unlock_bh(&hdev->fd_rule_lock);
+ return -EINVAL;
+ }
+
+ ret = hclge_fd_check_user_def_refcnt(hdev, rule);
+ if (ret)
+ goto out;
+
+ ret = hclge_clear_arfs_rules(hdev);
+ if (ret)
+ goto out;
+
+ ret = hclge_fd_config_rule(hdev, rule);
+ if (ret)
+ goto out;
+
+ rule->state = HCLGE_FD_ACTIVE;
+ hdev->fd_active_type = rule->rule_type;
+ hclge_update_fd_list(hdev, rule->state, rule->location, rule);
+
+out:
+ spin_unlock_bh(&hdev->fd_rule_lock);
+ return ret;
+}
+
+static bool hclge_is_cls_flower_active(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE;
+}
+
+static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie,
+ u16 *vport_id, u8 *action, u16 *queue_id)
+{
+ struct hclge_vport *vport = hdev->vport;
+
+ if (ring_cookie == RX_CLS_FLOW_DISC) {
+ *action = HCLGE_FD_ACTION_DROP_PACKET;
+ } else {
+ u32 ring = ethtool_get_flow_spec_ring(ring_cookie);
+ u8 vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
+ u16 tqps;
+
+ /* To keep consistent with user's configuration, minus 1 when
+ * printing 'vf', because vf id from ethtool is added 1 for vf.
+ */
+ if (vf > hdev->num_req_vfs) {
+ dev_err(&hdev->pdev->dev,
+ "Error: vf id (%u) should be less than %u\n",
+ vf - 1U, hdev->num_req_vfs);
+ return -EINVAL;
+ }
+
+ *vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
+ tqps = hdev->vport[vf].nic.kinfo.num_tqps;
+
+ if (ring >= tqps) {
+ dev_err(&hdev->pdev->dev,
+ "Error: queue id (%u) > max tqp num (%u)\n",
+ ring, tqps - 1U);
+ return -EINVAL;
+ }
+
+ *action = HCLGE_FD_ACTION_SELECT_QUEUE;
+ *queue_id = ring;
+ }
+
+ return 0;
+}
+
+static int hclge_add_fd_entry(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *cmd)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_fd_user_def_info info;
+ u16 dst_vport_id = 0, q_index = 0;
+ struct ethtool_rx_flow_spec *fs;
+ struct hclge_fd_rule *rule;
+ u32 unused = 0;
+ u8 action;
+ int ret;
+
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
+ dev_err(&hdev->pdev->dev,
+ "flow table director is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (!hdev->fd_en) {
+ dev_err(&hdev->pdev->dev,
+ "please enable flow director first\n");
+ return -EOPNOTSUPP;
+ }
+
+ fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
+
+ ret = hclge_fd_check_spec(hdev, fs, &unused, &info);
+ if (ret)
+ return ret;
+
+ ret = hclge_fd_parse_ring_cookie(hdev, fs->ring_cookie, &dst_vport_id,
+ &action, &q_index);
+ if (ret)
+ return ret;
+
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+
+ ret = hclge_fd_get_tuple(fs, rule, &info);
+ if (ret) {
+ kfree(rule);
+ return ret;
+ }
+
+ rule->flow_type = fs->flow_type;
+ rule->location = fs->location;
+ rule->unused_tuple = unused;
+ rule->vf_id = dst_vport_id;
+ rule->queue_id = q_index;
+ rule->action = action;
+ rule->rule_type = HCLGE_FD_EP_ACTIVE;
+
+ ret = hclge_add_fd_entry_common(hdev, rule);
+ if (ret)
+ kfree(rule);
+
+ return ret;
+}
+
+static int hclge_del_fd_entry(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *cmd)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct ethtool_rx_flow_spec *fs;
+ int ret;
+
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
+ return -EOPNOTSUPP;
+
+ fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
+
+ if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
+ return -EINVAL;
+
+ spin_lock_bh(&hdev->fd_rule_lock);
+ if (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
+ !test_bit(fs->location, hdev->fd_bmap)) {
+ dev_err(&hdev->pdev->dev,
+ "Delete fail, rule %u is inexistent\n", fs->location);
+ spin_unlock_bh(&hdev->fd_rule_lock);
+ return -ENOENT;
+ }
+
+ ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
+ NULL, false);
+ if (ret)
+ goto out;
+
+ hclge_update_fd_list(hdev, HCLGE_FD_DELETED, fs->location, NULL);
+
+out:
+ spin_unlock_bh(&hdev->fd_rule_lock);
+ return ret;
+}
+
+static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev,
+ bool clear_list)
+{
+ struct hclge_fd_rule *rule;
+ struct hlist_node *node;
+ u16 location;
+
+ spin_lock_bh(&hdev->fd_rule_lock);
+
+ for_each_set_bit(location, hdev->fd_bmap,
+ hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
+ hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
+ NULL, false);
+
+ if (clear_list) {
+ hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
+ rule_node) {
+ hlist_del(&rule->rule_node);
+ kfree(rule);
+ }
+ hdev->fd_active_type = HCLGE_FD_RULE_NONE;
+ hdev->hclge_fd_rule_num = 0;
+ bitmap_zero(hdev->fd_bmap,
+ hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
+ }
+
+ spin_unlock_bh(&hdev->fd_rule_lock);
+}
+
+static void hclge_del_all_fd_entries(struct hclge_dev *hdev)
+{
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
+ return;
+
+ hclge_clear_fd_rules_in_list(hdev, true);
+ hclge_fd_disable_user_def(hdev);
+}
+
+static int hclge_restore_fd_entries(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_fd_rule *rule;
+ struct hlist_node *node;
+
+ /* Return ok here, because reset error handling will check this
+ * return value. If error is returned here, the reset process will
+ * fail.
+ */
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
+ return 0;
+
+ /* if fd is disabled, should not restore it when reset */
+ if (!hdev->fd_en)
+ return 0;
+
+ spin_lock_bh(&hdev->fd_rule_lock);
+ hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
+ if (rule->state == HCLGE_FD_ACTIVE)
+ rule->state = HCLGE_FD_TO_ADD;
+ }
+ spin_unlock_bh(&hdev->fd_rule_lock);
+ set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
+
+ return 0;
+}
+
+static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *cmd)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev) || hclge_is_cls_flower_active(handle))
+ return -EOPNOTSUPP;
+
+ cmd->rule_cnt = hdev->hclge_fd_rule_num;
+ cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
+
+ return 0;
+}
+
+static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
+ struct ethtool_tcpip4_spec *spec,
+ struct ethtool_tcpip4_spec *spec_mask)
+{
+ spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
+ spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
+ 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
+
+ spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
+ spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
+ 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
+
+ spec->psrc = cpu_to_be16(rule->tuples.src_port);
+ spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
+ 0 : cpu_to_be16(rule->tuples_mask.src_port);
+
+ spec->pdst = cpu_to_be16(rule->tuples.dst_port);
+ spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
+ 0 : cpu_to_be16(rule->tuples_mask.dst_port);
+
+ spec->tos = rule->tuples.ip_tos;
+ spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
+ 0 : rule->tuples_mask.ip_tos;
+}
+
+static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
+ struct ethtool_usrip4_spec *spec,
+ struct ethtool_usrip4_spec *spec_mask)
+{
+ spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
+ spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
+ 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
+
+ spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
+ spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
+ 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
+
+ spec->tos = rule->tuples.ip_tos;
+ spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
+ 0 : rule->tuples_mask.ip_tos;
+
+ spec->proto = rule->tuples.ip_proto;
+ spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
+ 0 : rule->tuples_mask.ip_proto;
+
+ spec->ip_ver = ETH_RX_NFC_IP4;
+}
+
+static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
+ struct ethtool_tcpip6_spec *spec,
+ struct ethtool_tcpip6_spec *spec_mask)
+{
+ cpu_to_be32_array(spec->ip6src,
+ rule->tuples.src_ip, IPV6_SIZE);
+ cpu_to_be32_array(spec->ip6dst,
+ rule->tuples.dst_ip, IPV6_SIZE);
+ if (rule->unused_tuple & BIT(INNER_SRC_IP))
+ memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
+ else
+ cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
+ IPV6_SIZE);
+
+ if (rule->unused_tuple & BIT(INNER_DST_IP))
+ memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
+ else
+ cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
+ IPV6_SIZE);
+
+ spec->tclass = rule->tuples.ip_tos;
+ spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
+ 0 : rule->tuples_mask.ip_tos;
+
+ spec->psrc = cpu_to_be16(rule->tuples.src_port);
+ spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
+ 0 : cpu_to_be16(rule->tuples_mask.src_port);
+
+ spec->pdst = cpu_to_be16(rule->tuples.dst_port);
+ spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
+ 0 : cpu_to_be16(rule->tuples_mask.dst_port);
+}
+
+static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
+ struct ethtool_usrip6_spec *spec,
+ struct ethtool_usrip6_spec *spec_mask)
+{
+ cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
+ cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
+ if (rule->unused_tuple & BIT(INNER_SRC_IP))
+ memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
+ else
+ cpu_to_be32_array(spec_mask->ip6src,
+ rule->tuples_mask.src_ip, IPV6_SIZE);
+
+ if (rule->unused_tuple & BIT(INNER_DST_IP))
+ memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
+ else
+ cpu_to_be32_array(spec_mask->ip6dst,
+ rule->tuples_mask.dst_ip, IPV6_SIZE);
+
+ spec->tclass = rule->tuples.ip_tos;
+ spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
+ 0 : rule->tuples_mask.ip_tos;
+
+ spec->l4_proto = rule->tuples.ip_proto;
+ spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
+ 0 : rule->tuples_mask.ip_proto;
+}
+
+static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
+ struct ethhdr *spec,
+ struct ethhdr *spec_mask)
+{
+ ether_addr_copy(spec->h_source, rule->tuples.src_mac);
+ ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
+
+ if (rule->unused_tuple & BIT(INNER_SRC_MAC))
+ eth_zero_addr(spec_mask->h_source);
+ else
+ ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
+
+ if (rule->unused_tuple & BIT(INNER_DST_MAC))
+ eth_zero_addr(spec_mask->h_dest);
+ else
+ ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
+
+ spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
+ spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
+ 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
+}
+
+static void hclge_fd_get_user_def_info(struct ethtool_rx_flow_spec *fs,
+ struct hclge_fd_rule *rule)
+{
+ if ((rule->unused_tuple & HCLGE_FD_TUPLE_USER_DEF_TUPLES) ==
+ HCLGE_FD_TUPLE_USER_DEF_TUPLES) {
+ fs->h_ext.data[0] = 0;
+ fs->h_ext.data[1] = 0;
+ fs->m_ext.data[0] = 0;
+ fs->m_ext.data[1] = 0;
+ } else {
+ fs->h_ext.data[0] = cpu_to_be32(rule->ep.user_def.offset);
+ fs->h_ext.data[1] = cpu_to_be32(rule->ep.user_def.data);
+ fs->m_ext.data[0] =
+ cpu_to_be32(HCLGE_FD_USER_DEF_OFFSET_UNMASK);
+ fs->m_ext.data[1] = cpu_to_be32(rule->ep.user_def.data_mask);
+ }
+}
+
+static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
+ struct hclge_fd_rule *rule)
+{
+ if (fs->flow_type & FLOW_EXT) {
+ fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
+ fs->m_ext.vlan_tci =
+ rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
+ 0 : cpu_to_be16(rule->tuples_mask.vlan_tag1);
+
+ hclge_fd_get_user_def_info(fs, rule);
+ }
+
+ if (fs->flow_type & FLOW_MAC_EXT) {
+ ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
+ if (rule->unused_tuple & BIT(INNER_DST_MAC))
+ eth_zero_addr(fs->m_u.ether_spec.h_dest);
+ else
+ ether_addr_copy(fs->m_u.ether_spec.h_dest,
+ rule->tuples_mask.dst_mac);
+ }
+}
+
+static struct hclge_fd_rule *hclge_get_fd_rule(struct hclge_dev *hdev,
+ u16 location)
+{
+ struct hclge_fd_rule *rule = NULL;
+ struct hlist_node *node2;
+
+ hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
+ if (rule->location == location)
+ return rule;
+ else if (rule->location > location)
+ return NULL;
+ }
+
+ return NULL;
+}
+
+static void hclge_fd_get_ring_cookie(struct ethtool_rx_flow_spec *fs,
+ struct hclge_fd_rule *rule)
+{
+ if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
+ fs->ring_cookie = RX_CLS_FLOW_DISC;
+ } else {
+ u64 vf_id;
+
+ fs->ring_cookie = rule->queue_id;
+ vf_id = rule->vf_id;
+ vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
+ fs->ring_cookie |= vf_id;
+ }
+}
+
+static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *cmd)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_fd_rule *rule = NULL;
+ struct hclge_dev *hdev = vport->back;
+ struct ethtool_rx_flow_spec *fs;
+
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
+ return -EOPNOTSUPP;
+
+ fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
+
+ spin_lock_bh(&hdev->fd_rule_lock);
+
+ rule = hclge_get_fd_rule(hdev, fs->location);
+ if (!rule) {
+ spin_unlock_bh(&hdev->fd_rule_lock);
+ return -ENOENT;
+ }
+
+ fs->flow_type = rule->flow_type;
+ switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
+ case SCTP_V4_FLOW:
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
+ &fs->m_u.tcp_ip4_spec);
+ break;
+ case IP_USER_FLOW:
+ hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
+ &fs->m_u.usr_ip4_spec);
+ break;
+ case SCTP_V6_FLOW:
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
+ &fs->m_u.tcp_ip6_spec);
+ break;
+ case IPV6_USER_FLOW:
+ hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
+ &fs->m_u.usr_ip6_spec);
+ break;
+ /* The flow type of fd rule has been checked before adding in to rule
+ * list. As other flow types have been handled, it must be ETHER_FLOW
+ * for the default case
+ */
+ default:
+ hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
+ &fs->m_u.ether_spec);
+ break;
+ }
+
+ hclge_fd_get_ext_info(fs, rule);
+
+ hclge_fd_get_ring_cookie(fs, rule);
+
+ spin_unlock_bh(&hdev->fd_rule_lock);
+
+ return 0;
+}
+
+static int hclge_get_all_rules(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *cmd, u32 *rule_locs)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_fd_rule *rule;
+ struct hlist_node *node2;
+ int cnt = 0;
+
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
+ return -EOPNOTSUPP;
+
+ cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
+
+ spin_lock_bh(&hdev->fd_rule_lock);
+ hlist_for_each_entry_safe(rule, node2,
+ &hdev->fd_rule_list, rule_node) {
+ if (cnt == cmd->rule_cnt) {
+ spin_unlock_bh(&hdev->fd_rule_lock);
+ return -EMSGSIZE;
+ }
+
+ if (rule->state == HCLGE_FD_TO_DEL)
+ continue;
+
+ rule_locs[cnt] = rule->location;
+ cnt++;
+ }
+
+ spin_unlock_bh(&hdev->fd_rule_lock);
+
+ cmd->rule_cnt = cnt;
+
+ return 0;
+}
+
+static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
+ struct hclge_fd_rule_tuples *tuples)
+{
+#define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
+#define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
+
+ tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
+ tuples->ip_proto = fkeys->basic.ip_proto;
+ tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
+
+ if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
+ tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
+ tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
+ } else {
+ int i;
+
+ for (i = 0; i < IPV6_SIZE; i++) {
+ tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
+ tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
+ }
+ }
+}
+
+/* traverse all rules, check whether an existed rule has the same tuples */
+static struct hclge_fd_rule *
+hclge_fd_search_flow_keys(struct hclge_dev *hdev,
+ const struct hclge_fd_rule_tuples *tuples)
+{
+ struct hclge_fd_rule *rule = NULL;
+ struct hlist_node *node;
+
+ hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
+ if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
+ return rule;
+ }
+
+ return NULL;
+}
+
+static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
+ struct hclge_fd_rule *rule)
+{
+ rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
+ BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
+ BIT(INNER_SRC_PORT);
+ rule->action = 0;
+ rule->vf_id = 0;
+ rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
+ rule->state = HCLGE_FD_TO_ADD;
+ if (tuples->ether_proto == ETH_P_IP) {
+ if (tuples->ip_proto == IPPROTO_TCP)
+ rule->flow_type = TCP_V4_FLOW;
+ else
+ rule->flow_type = UDP_V4_FLOW;
+ } else {
+ if (tuples->ip_proto == IPPROTO_TCP)
+ rule->flow_type = TCP_V6_FLOW;
+ else
+ rule->flow_type = UDP_V6_FLOW;
+ }
+ memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
+ memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
+}
+
+static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
+ u16 flow_id, struct flow_keys *fkeys)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_fd_rule_tuples new_tuples = {};
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_fd_rule *rule;
+ u16 bit_id;
+
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
+ return -EOPNOTSUPP;
+
+ /* when there is already fd rule existed add by user,
+ * arfs should not work
+ */
+ spin_lock_bh(&hdev->fd_rule_lock);
+ if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE &&
+ hdev->fd_active_type != HCLGE_FD_RULE_NONE) {
+ spin_unlock_bh(&hdev->fd_rule_lock);
+ return -EOPNOTSUPP;
+ }
+
+ hclge_fd_get_flow_tuples(fkeys, &new_tuples);
+
+ /* check is there flow director filter existed for this flow,
+ * if not, create a new filter for it;
+ * if filter exist with different queue id, modify the filter;
+ * if filter exist with same queue id, do nothing
+ */
+ rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
+ if (!rule) {
+ bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
+ if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
+ spin_unlock_bh(&hdev->fd_rule_lock);
+ return -ENOSPC;
+ }
+
+ rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
+ if (!rule) {
+ spin_unlock_bh(&hdev->fd_rule_lock);
+ return -ENOMEM;
+ }
+
+ rule->location = bit_id;
+ rule->arfs.flow_id = flow_id;
+ rule->queue_id = queue_id;
+ hclge_fd_build_arfs_rule(&new_tuples, rule);
+ hclge_update_fd_list(hdev, rule->state, rule->location, rule);
+ hdev->fd_active_type = HCLGE_FD_ARFS_ACTIVE;
+ } else if (rule->queue_id != queue_id) {
+ rule->queue_id = queue_id;
+ rule->state = HCLGE_FD_TO_ADD;
+ set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
+ hclge_task_schedule(hdev, 0);
+ }
+ spin_unlock_bh(&hdev->fd_rule_lock);
+ return rule->location;
+}
+
+static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
+{
+#ifdef CONFIG_RFS_ACCEL
+ struct hnae3_handle *handle = &hdev->vport[0].nic;
+ struct hclge_fd_rule *rule;
+ struct hlist_node *node;
+
+ spin_lock_bh(&hdev->fd_rule_lock);
+ if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
+ spin_unlock_bh(&hdev->fd_rule_lock);
+ return;
+ }
+ hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
+ if (rule->state != HCLGE_FD_ACTIVE)
+ continue;
+ if (rps_may_expire_flow(handle->netdev, rule->queue_id,
+ rule->arfs.flow_id, rule->location)) {
+ rule->state = HCLGE_FD_TO_DEL;
+ set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
+ }
+ }
+ spin_unlock_bh(&hdev->fd_rule_lock);
+#endif
+}
+
+/* make sure being called after lock up with fd_rule_lock */
+static int hclge_clear_arfs_rules(struct hclge_dev *hdev)
+{
+#ifdef CONFIG_RFS_ACCEL
+ struct hclge_fd_rule *rule;
+ struct hlist_node *node;
+ int ret;
+
+ if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE)
+ return 0;
+
+ hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
+ switch (rule->state) {
+ case HCLGE_FD_TO_DEL:
+ case HCLGE_FD_ACTIVE:
+ ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
+ rule->location, NULL, false);
+ if (ret)
+ return ret;
+ fallthrough;
+ case HCLGE_FD_TO_ADD:
+ hclge_fd_dec_rule_cnt(hdev, rule->location);
+ hlist_del(&rule->rule_node);
+ kfree(rule);
+ break;
+ default:
+ break;
+ }
+ }
+ hclge_sync_fd_state(hdev);
+
+#endif
+ return 0;
+}
+
+static void hclge_get_cls_key_basic(const struct flow_rule *flow,
+ struct hclge_fd_rule *rule)
+{
+ if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
+ u16 ethtype_key, ethtype_mask;
+
+ flow_rule_match_basic(flow, &match);
+ ethtype_key = ntohs(match.key->n_proto);
+ ethtype_mask = ntohs(match.mask->n_proto);
+
+ if (ethtype_key == ETH_P_ALL) {
+ ethtype_key = 0;
+ ethtype_mask = 0;
+ }
+ rule->tuples.ether_proto = ethtype_key;
+ rule->tuples_mask.ether_proto = ethtype_mask;
+ rule->tuples.ip_proto = match.key->ip_proto;
+ rule->tuples_mask.ip_proto = match.mask->ip_proto;
+ } else {
+ rule->unused_tuple |= BIT(INNER_IP_PROTO);
+ rule->unused_tuple |= BIT(INNER_ETH_TYPE);
+ }
+}
+
+static void hclge_get_cls_key_mac(const struct flow_rule *flow,
+ struct hclge_fd_rule *rule)
+{
+ if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_match_eth_addrs match;
+
+ flow_rule_match_eth_addrs(flow, &match);
+ ether_addr_copy(rule->tuples.dst_mac, match.key->dst);
+ ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst);
+ ether_addr_copy(rule->tuples.src_mac, match.key->src);
+ ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src);
+ } else {
+ rule->unused_tuple |= BIT(INNER_DST_MAC);
+ rule->unused_tuple |= BIT(INNER_SRC_MAC);
+ }
+}
+
+static void hclge_get_cls_key_vlan(const struct flow_rule *flow,
+ struct hclge_fd_rule *rule)
+{
+ if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
+
+ flow_rule_match_vlan(flow, &match);
+ rule->tuples.vlan_tag1 = match.key->vlan_id |
+ (match.key->vlan_priority << VLAN_PRIO_SHIFT);
+ rule->tuples_mask.vlan_tag1 = match.mask->vlan_id |
+ (match.mask->vlan_priority << VLAN_PRIO_SHIFT);
+ } else {
+ rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST);
+ }
+}
+
+static void hclge_get_cls_key_ip(const struct flow_rule *flow,
+ struct hclge_fd_rule *rule)
+{
+ u16 addr_type = 0;
+
+ if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_match_control match;
+
+ flow_rule_match_control(flow, &match);
+ addr_type = match.key->addr_type;
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+ struct flow_match_ipv4_addrs match;
+
+ flow_rule_match_ipv4_addrs(flow, &match);
+ rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src);
+ rule->tuples_mask.src_ip[IPV4_INDEX] =
+ be32_to_cpu(match.mask->src);
+ rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst);
+ rule->tuples_mask.dst_ip[IPV4_INDEX] =
+ be32_to_cpu(match.mask->dst);
+ } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
+ struct flow_match_ipv6_addrs match;
+
+ flow_rule_match_ipv6_addrs(flow, &match);
+ be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32,
+ IPV6_SIZE);
+ be32_to_cpu_array(rule->tuples_mask.src_ip,
+ match.mask->src.s6_addr32, IPV6_SIZE);
+ be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32,
+ IPV6_SIZE);
+ be32_to_cpu_array(rule->tuples_mask.dst_ip,
+ match.mask->dst.s6_addr32, IPV6_SIZE);
+ } else {
+ rule->unused_tuple |= BIT(INNER_SRC_IP);
+ rule->unused_tuple |= BIT(INNER_DST_IP);
+ }
+}
+
+static void hclge_get_cls_key_port(const struct flow_rule *flow,
+ struct hclge_fd_rule *rule)
+{
+ if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_match_ports match;
+
+ flow_rule_match_ports(flow, &match);
+
+ rule->tuples.src_port = be16_to_cpu(match.key->src);
+ rule->tuples_mask.src_port = be16_to_cpu(match.mask->src);
+ rule->tuples.dst_port = be16_to_cpu(match.key->dst);
+ rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst);
+ } else {
+ rule->unused_tuple |= BIT(INNER_SRC_PORT);
+ rule->unused_tuple |= BIT(INNER_DST_PORT);
+ }
+}
+
+static int hclge_parse_cls_flower(struct hclge_dev *hdev,
+ struct flow_cls_offload *cls_flower,
+ struct hclge_fd_rule *rule)
+{
+ struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower);
+ struct flow_dissector *dissector = flow->match.dissector;
+
+ if (dissector->used_keys &
+ ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_PORTS))) {
+ dev_err(&hdev->pdev->dev, "unsupported key set: %#llx\n",
+ dissector->used_keys);
+ return -EOPNOTSUPP;
+ }
+
+ hclge_get_cls_key_basic(flow, rule);
+ hclge_get_cls_key_mac(flow, rule);
+ hclge_get_cls_key_vlan(flow, rule);
+ hclge_get_cls_key_ip(flow, rule);
+ hclge_get_cls_key_port(flow, rule);
+
+ return 0;
+}
+
+static int hclge_check_cls_flower(struct hclge_dev *hdev,
+ struct flow_cls_offload *cls_flower, int tc)
+{
+ u32 prio = cls_flower->common.prio;
+
+ if (tc < 0 || tc > hdev->tc_max) {
+ dev_err(&hdev->pdev->dev, "invalid traffic class\n");
+ return -EINVAL;
+ }
+
+ if (prio == 0 ||
+ prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
+ dev_err(&hdev->pdev->dev,
+ "prio %u should be in range[1, %u]\n",
+ prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
+ return -EINVAL;
+ }
+
+ if (test_bit(prio - 1, hdev->fd_bmap)) {
+ dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int hclge_add_cls_flower(struct hnae3_handle *handle,
+ struct flow_cls_offload *cls_flower,
+ int tc)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_fd_rule *rule;
+ int ret;
+
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
+ dev_err(&hdev->pdev->dev,
+ "cls flower is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ ret = hclge_check_cls_flower(hdev, cls_flower, tc);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to check cls flower params, ret = %d\n", ret);
+ return ret;
+ }
+
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+
+ ret = hclge_parse_cls_flower(hdev, cls_flower, rule);
+ if (ret) {
+ kfree(rule);
+ return ret;
+ }
+
+ rule->action = HCLGE_FD_ACTION_SELECT_TC;
+ rule->cls_flower.tc = tc;
+ rule->location = cls_flower->common.prio - 1;
+ rule->vf_id = 0;
+ rule->cls_flower.cookie = cls_flower->cookie;
+ rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE;
+
+ ret = hclge_add_fd_entry_common(hdev, rule);
+ if (ret)
+ kfree(rule);
+
+ return ret;
+}
+
+static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev,
+ unsigned long cookie)
+{
+ struct hclge_fd_rule *rule;
+ struct hlist_node *node;
+
+ hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
+ if (rule->cls_flower.cookie == cookie)
+ return rule;
+ }
+
+ return NULL;
+}
+
+static int hclge_del_cls_flower(struct hnae3_handle *handle,
+ struct flow_cls_offload *cls_flower)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_fd_rule *rule;
+ int ret;
+
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
+ return -EOPNOTSUPP;
+
+ spin_lock_bh(&hdev->fd_rule_lock);
+
+ rule = hclge_find_cls_flower(hdev, cls_flower->cookie);
+ if (!rule) {
+ spin_unlock_bh(&hdev->fd_rule_lock);
+ return -EINVAL;
+ }
+
+ ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location,
+ NULL, false);
+ if (ret) {
+ /* if tcam config fail, set rule state to TO_DEL,
+ * so the rule will be deleted when periodic
+ * task being scheduled.
+ */
+ hclge_update_fd_list(hdev, HCLGE_FD_TO_DEL, rule->location, NULL);
+ set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
+ spin_unlock_bh(&hdev->fd_rule_lock);
+ return ret;
+ }
+
+ hclge_update_fd_list(hdev, HCLGE_FD_DELETED, rule->location, NULL);
+ spin_unlock_bh(&hdev->fd_rule_lock);
+
+ return 0;
+}
+
+static void hclge_sync_fd_list(struct hclge_dev *hdev, struct hlist_head *hlist)
+{
+ struct hclge_fd_rule *rule;
+ struct hlist_node *node;
+ int ret = 0;
+
+ if (!test_and_clear_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state))
+ return;
+
+ spin_lock_bh(&hdev->fd_rule_lock);
+
+ hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
+ switch (rule->state) {
+ case HCLGE_FD_TO_ADD:
+ ret = hclge_fd_config_rule(hdev, rule);
+ if (ret)
+ goto out;
+ rule->state = HCLGE_FD_ACTIVE;
+ break;
+ case HCLGE_FD_TO_DEL:
+ ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
+ rule->location, NULL, false);
+ if (ret)
+ goto out;
+ hclge_fd_dec_rule_cnt(hdev, rule->location);
+ hclge_fd_free_node(hdev, rule);
+ break;
+ default:
+ break;
+ }
+ }
+
+out:
+ if (ret)
+ set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
+
+ spin_unlock_bh(&hdev->fd_rule_lock);
+}
+
+static void hclge_sync_fd_table(struct hclge_dev *hdev)
+{
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
+ return;
+
+ if (test_and_clear_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state)) {
+ bool clear_list = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
+
+ hclge_clear_fd_rules_in_list(hdev, clear_list);
+ }
+
+ hclge_sync_fd_user_def_cfg(hdev, false);
+
+ hclge_sync_fd_list(hdev, &hdev->fd_rule_list);
+}
+
+static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
+ hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
+}
+
+static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ return test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
+}
+
+static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
+}
+
+static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ return hdev->rst_stats.hw_reset_done_cnt;
+}
+
+static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ hdev->fd_en = enable;
+
+ if (!enable)
+ set_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state);
+ else
+ hclge_restore_fd_entries(handle);
+
+ hclge_task_schedule(hdev, 0);
+}
+
+static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
+{
+#define HCLGE_LINK_STATUS_WAIT_CNT 3
+
+ struct hclge_desc desc;
+ struct hclge_config_mac_mode_cmd *req =
+ (struct hclge_config_mac_mode_cmd *)desc.data;
+ u32 loop_en = 0;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
+
+ if (enable) {
+ hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
+ hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
+ hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
+ hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
+ hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
+ hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
+ hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
+ hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
+ hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
+ hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
+ }
+
+ req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "mac enable fail, ret =%d.\n", ret);
+ return;
+ }
+
+ if (!enable)
+ hclge_mac_link_status_wait(hdev, HCLGE_LINK_STATUS_DOWN,
+ HCLGE_LINK_STATUS_WAIT_CNT);
+}
+
+static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
+ u8 switch_param, u8 param_mask)
+{
+ struct hclge_mac_vlan_switch_cmd *req;
+ struct hclge_desc desc;
+ u32 func_id;
+ int ret;
+
+ func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
+ req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
+
+ /* read current config parameter */
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
+ true);
+ req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
+ req->func_id = cpu_to_le32(func_id);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "read mac vlan switch parameter fail, ret = %d\n", ret);
+ return ret;
+ }
+
+ /* modify and write new config parameter */
+ hclge_comm_cmd_reuse_desc(&desc, false);
+ req->switch_param = (req->switch_param & param_mask) | switch_param;
+ req->param_mask = param_mask;
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "set mac vlan switch parameter fail, ret = %d\n", ret);
+ return ret;
+}
+
+static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
+ int link_ret)
+{
+#define HCLGE_PHY_LINK_STATUS_NUM 200
+
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+ int i = 0;
+ int ret;
+
+ do {
+ ret = phy_read_status(phydev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "phy update link status fail, ret = %d\n", ret);
+ return;
+ }
+
+ if (phydev->link == link_ret)
+ break;
+
+ msleep(HCLGE_LINK_STATUS_MS);
+ } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
+}
+
+static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret,
+ int wait_cnt)
+{
+ int link_status;
+ int i = 0;
+ int ret;
+
+ do {
+ ret = hclge_get_mac_link_status(hdev, &link_status);
+ if (ret)
+ return ret;
+ if (link_status == link_ret)
+ return 0;
+
+ msleep(HCLGE_LINK_STATUS_MS);
+ } while (++i < wait_cnt);
+ return -EBUSY;
+}
+
+static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
+ bool is_phy)
+{
+#define HCLGE_MAC_LINK_STATUS_NUM 100
+
+ int link_ret;
+
+ link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
+
+ if (is_phy)
+ hclge_phy_link_status_wait(hdev, link_ret);
+
+ return hclge_mac_link_status_wait(hdev, link_ret,
+ HCLGE_MAC_LINK_STATUS_NUM);
+}
+
+static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
+{
+ struct hclge_config_mac_mode_cmd *req;
+ struct hclge_desc desc;
+ u32 loop_en;
+ int ret;
+
+ req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
+ /* 1 Read out the MAC mode config at first */
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "mac loopback get fail, ret =%d.\n", ret);
+ return ret;
+ }
+
+ /* 2 Then setup the loopback flag */
+ loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
+ hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
+
+ req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
+
+ /* 3 Config mac work mode with loopback flag
+ * and its original configure parameters
+ */
+ hclge_comm_cmd_reuse_desc(&desc, false);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "mac loopback set fail, ret =%d.\n", ret);
+ return ret;
+}
+
+static int hclge_cfg_common_loopback_cmd_send(struct hclge_dev *hdev, bool en,
+ enum hnae3_loop loop_mode)
+{
+ struct hclge_common_lb_cmd *req;
+ struct hclge_desc desc;
+ u8 loop_mode_b;
+ int ret;
+
+ req = (struct hclge_common_lb_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, false);
+
+ switch (loop_mode) {
+ case HNAE3_LOOP_SERIAL_SERDES:
+ loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
+ break;
+ case HNAE3_LOOP_PARALLEL_SERDES:
+ loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
+ break;
+ case HNAE3_LOOP_PHY:
+ loop_mode_b = HCLGE_CMD_GE_PHY_INNER_LOOP_B;
+ break;
+ default:
+ dev_err(&hdev->pdev->dev,
+ "unsupported loopback mode %d\n", loop_mode);
+ return -ENOTSUPP;
+ }
+
+ req->mask = loop_mode_b;
+ if (en)
+ req->enable = loop_mode_b;
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to send loopback cmd, loop_mode = %d, ret = %d\n",
+ loop_mode, ret);
+
+ return ret;
+}
+
+static int hclge_cfg_common_loopback_wait(struct hclge_dev *hdev)
+{
+#define HCLGE_COMMON_LB_RETRY_MS 10
+#define HCLGE_COMMON_LB_RETRY_NUM 100
+
+ struct hclge_common_lb_cmd *req;
+ struct hclge_desc desc;
+ u32 i = 0;
+ int ret;
+
+ req = (struct hclge_common_lb_cmd *)desc.data;
+
+ do {
+ msleep(HCLGE_COMMON_LB_RETRY_MS);
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK,
+ true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get loopback done status, ret = %d\n",
+ ret);
+ return ret;
+ }
+ } while (++i < HCLGE_COMMON_LB_RETRY_NUM &&
+ !(req->result & HCLGE_CMD_COMMON_LB_DONE_B));
+
+ if (!(req->result & HCLGE_CMD_COMMON_LB_DONE_B)) {
+ dev_err(&hdev->pdev->dev, "wait loopback timeout\n");
+ return -EBUSY;
+ } else if (!(req->result & HCLGE_CMD_COMMON_LB_SUCCESS_B)) {
+ dev_err(&hdev->pdev->dev, "failed to do loopback test\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en,
+ enum hnae3_loop loop_mode)
+{
+ int ret;
+
+ ret = hclge_cfg_common_loopback_cmd_send(hdev, en, loop_mode);
+ if (ret)
+ return ret;
+
+ return hclge_cfg_common_loopback_wait(hdev);
+}
+
+static int hclge_set_common_loopback(struct hclge_dev *hdev, bool en,
+ enum hnae3_loop loop_mode)
+{
+ int ret;
+
+ ret = hclge_cfg_common_loopback(hdev, en, loop_mode);
+ if (ret)
+ return ret;
+
+ hclge_cfg_mac_mode(hdev, en);
+
+ ret = hclge_mac_phy_link_status_wait(hdev, en, false);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "serdes loopback config mac mode timeout\n");
+
+ return ret;
+}
+
+static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
+ struct phy_device *phydev)
+{
+ int ret;
+
+ if (!phydev->suspended) {
+ ret = phy_suspend(phydev);
+ if (ret)
+ return ret;
+ }
+
+ ret = phy_resume(phydev);
+ if (ret)
+ return ret;
+
+ return phy_loopback(phydev, true);
+}
+
+static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
+ struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_loopback(phydev, false);
+ if (ret)
+ return ret;
+
+ return phy_suspend(phydev);
+}
+
+static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
+{
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+ int ret;
+
+ if (!phydev) {
+ if (hnae3_dev_phy_imp_supported(hdev))
+ return hclge_set_common_loopback(hdev, en,
+ HNAE3_LOOP_PHY);
+ return -ENOTSUPP;
+ }
+
+ if (en)
+ ret = hclge_enable_phy_loopback(hdev, phydev);
+ else
+ ret = hclge_disable_phy_loopback(hdev, phydev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "set phy loopback fail, ret = %d\n", ret);
+ return ret;
+ }
+
+ hclge_cfg_mac_mode(hdev, en);
+
+ ret = hclge_mac_phy_link_status_wait(hdev, en, true);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "phy loopback config mac mode timeout\n");
+
+ return ret;
+}
+
+static int hclge_tqp_enable_cmd_send(struct hclge_dev *hdev, u16 tqp_id,
+ u16 stream_id, bool enable)
+{
+ struct hclge_desc desc;
+ struct hclge_cfg_com_tqp_queue_cmd *req =
+ (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
+ req->tqp_id = cpu_to_le16(tqp_id);
+ req->stream_id = cpu_to_le16(stream_id);
+ if (enable)
+ req->enable |= 1U << HCLGE_TQP_ENABLE_B;
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
+static int hclge_tqp_enable(struct hnae3_handle *handle, bool enable)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+ u16 i;
+
+ for (i = 0; i < handle->kinfo.num_tqps; i++) {
+ ret = hclge_tqp_enable_cmd_send(hdev, i, 0, enable);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static int hclge_set_loopback(struct hnae3_handle *handle,
+ enum hnae3_loop loop_mode, bool en)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ int ret = 0;
+
+ /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
+ * default, SSU loopback is enabled, so if the SMAC and the DMAC are
+ * the same, the packets are looped back in the SSU. If SSU loopback
+ * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
+ */
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
+ u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
+
+ ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
+ HCLGE_SWITCH_ALW_LPBK_MASK);
+ if (ret)
+ return ret;
+ }
+
+ switch (loop_mode) {
+ case HNAE3_LOOP_APP:
+ ret = hclge_set_app_loopback(hdev, en);
+ break;
+ case HNAE3_LOOP_SERIAL_SERDES:
+ case HNAE3_LOOP_PARALLEL_SERDES:
+ ret = hclge_set_common_loopback(hdev, en, loop_mode);
+ break;
+ case HNAE3_LOOP_PHY:
+ ret = hclge_set_phy_loopback(hdev, en);
+ break;
+ case HNAE3_LOOP_EXTERNAL:
+ break;
+ default:
+ ret = -ENOTSUPP;
+ dev_err(&hdev->pdev->dev,
+ "loop_mode %d is not supported\n", loop_mode);
+ break;
+ }
+
+ if (ret)
+ return ret;
+
+ ret = hclge_tqp_enable(handle, en);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "failed to %s tqp in loopback, ret = %d\n",
+ en ? "enable" : "disable", ret);
+
+ return ret;
+}
+
+static int hclge_set_default_loopback(struct hclge_dev *hdev)
+{
+ int ret;
+
+ ret = hclge_set_app_loopback(hdev, false);
+ if (ret)
+ return ret;
+
+ ret = hclge_cfg_common_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
+ if (ret)
+ return ret;
+
+ return hclge_cfg_common_loopback(hdev, false,
+ HNAE3_LOOP_PARALLEL_SERDES);
+}
+
+static void hclge_flush_link_update(struct hclge_dev *hdev)
+{
+#define HCLGE_FLUSH_LINK_TIMEOUT 100000
+
+ unsigned long last = hdev->serv_processed_cnt;
+ int i = 0;
+
+ while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
+ i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
+ last == hdev->serv_processed_cnt)
+ usleep_range(1, 1);
+}
+
+static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ if (enable) {
+ hclge_task_schedule(hdev, 0);
+ } else {
+ /* Set the DOWN flag here to disable link updating */
+ set_bit(HCLGE_STATE_DOWN, &hdev->state);
+
+ /* flush memory to make sure DOWN is seen by service task */
+ smp_mb__before_atomic();
+ hclge_flush_link_update(hdev);
+ }
+}
+
+static int hclge_ae_start(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ /* mac enable */
+ hclge_cfg_mac_mode(hdev, true);
+ clear_bit(HCLGE_STATE_DOWN, &hdev->state);
+ hdev->hw.mac.link = 0;
+
+ /* reset tqp stats */
+ hclge_comm_reset_tqp_stats(handle);
+
+ hclge_mac_start_phy(hdev);
+
+ return 0;
+}
+
+static void hclge_ae_stop(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ set_bit(HCLGE_STATE_DOWN, &hdev->state);
+ spin_lock_bh(&hdev->fd_rule_lock);
+ hclge_clear_arfs_rules(hdev);
+ spin_unlock_bh(&hdev->fd_rule_lock);
+
+ /* If it is not PF reset or FLR, the firmware will disable the MAC,
+ * so it only need to stop phy here.
+ */
+ if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) {
+ hclge_pfc_pause_en_cfg(hdev, HCLGE_PFC_TX_RX_DISABLE,
+ HCLGE_PFC_DISABLE);
+ if (hdev->reset_type != HNAE3_FUNC_RESET &&
+ hdev->reset_type != HNAE3_FLR_RESET) {
+ hclge_mac_stop_phy(hdev);
+ hclge_update_link_status(hdev);
+ return;
+ }
+ }
+
+ hclge_reset_tqp(handle);
+
+ hclge_config_mac_tnl_int(hdev, false);
+
+ /* Mac disable */
+ hclge_cfg_mac_mode(hdev, false);
+
+ hclge_mac_stop_phy(hdev);
+
+ /* reset tqp stats */
+ hclge_comm_reset_tqp_stats(handle);
+ hclge_update_link_status(hdev);
+}
+
+int hclge_vport_start(struct hclge_vport *vport)
+{
+ struct hclge_dev *hdev = vport->back;
+
+ set_bit(HCLGE_VPORT_STATE_INITED, &vport->state);
+ set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
+ set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
+ vport->last_active_jiffies = jiffies;
+ vport->need_notify = 0;
+
+ if (test_bit(vport->vport_id, hdev->vport_config_block)) {
+ if (vport->vport_id) {
+ hclge_restore_mac_table_common(vport);
+ hclge_restore_vport_vlan_table(vport);
+ } else {
+ hclge_restore_hw_table(hdev);
+ }
+ }
+
+ clear_bit(vport->vport_id, hdev->vport_config_block);
+
+ return 0;
+}
+
+void hclge_vport_stop(struct hclge_vport *vport)
+{
+ clear_bit(HCLGE_VPORT_STATE_INITED, &vport->state);
+ clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
+ vport->need_notify = 0;
+}
+
+static int hclge_client_start(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+
+ return hclge_vport_start(vport);
+}
+
+static void hclge_client_stop(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+
+ hclge_vport_stop(vport);
+}
+
+static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
+ u16 cmdq_resp, u8 resp_code,
+ enum hclge_mac_vlan_tbl_opcode op)
+{
+ struct hclge_dev *hdev = vport->back;
+
+ if (cmdq_resp) {
+ dev_err(&hdev->pdev->dev,
+ "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
+ cmdq_resp);
+ return -EIO;
+ }
+
+ if (op == HCLGE_MAC_VLAN_ADD) {
+ if (!resp_code || resp_code == 1)
+ return 0;
+ else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
+ resp_code == HCLGE_ADD_MC_OVERFLOW)
+ return -ENOSPC;
+
+ dev_err(&hdev->pdev->dev,
+ "add mac addr failed for undefined, code=%u.\n",
+ resp_code);
+ return -EIO;
+ } else if (op == HCLGE_MAC_VLAN_REMOVE) {
+ if (!resp_code) {
+ return 0;
+ } else if (resp_code == 1) {
+ dev_dbg(&hdev->pdev->dev,
+ "remove mac addr failed for miss.\n");
+ return -ENOENT;
+ }
+
+ dev_err(&hdev->pdev->dev,
+ "remove mac addr failed for undefined, code=%u.\n",
+ resp_code);
+ return -EIO;
+ } else if (op == HCLGE_MAC_VLAN_LKUP) {
+ if (!resp_code) {
+ return 0;
+ } else if (resp_code == 1) {
+ dev_dbg(&hdev->pdev->dev,
+ "lookup mac addr failed for miss.\n");
+ return -ENOENT;
+ }
+
+ dev_err(&hdev->pdev->dev,
+ "lookup mac addr failed for undefined, code=%u.\n",
+ resp_code);
+ return -EIO;
+ }
+
+ dev_err(&hdev->pdev->dev,
+ "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
+
+ return -EINVAL;
+}
+
+static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
+{
+#define HCLGE_VF_NUM_IN_FIRST_DESC 192
+
+ unsigned int word_num;
+ unsigned int bit_num;
+
+ if (vfid > 255 || vfid < 0)
+ return -EIO;
+
+ if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
+ word_num = vfid / 32;
+ bit_num = vfid % 32;
+ if (clr)
+ desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
+ else
+ desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
+ } else {
+ word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
+ bit_num = vfid % 32;
+ if (clr)
+ desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
+ else
+ desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
+ }
+
+ return 0;
+}
+
+static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
+{
+#define HCLGE_DESC_NUMBER 3
+#define HCLGE_FUNC_NUMBER_PER_DESC 6
+ int i, j;
+
+ for (i = 1; i < HCLGE_DESC_NUMBER; i++)
+ for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
+ if (desc[i].data[j])
+ return false;
+
+ return true;
+}
+
+static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
+ const u8 *addr, bool is_mc)
+{
+ const unsigned char *mac_addr = addr;
+ u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
+ (mac_addr[0]) | (mac_addr[1] << 8);
+ u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
+
+ hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
+ if (is_mc) {
+ hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
+ hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
+ }
+
+ new_req->mac_addr_hi32 = cpu_to_le32(high_val);
+ new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
+}
+
+static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
+ struct hclge_mac_vlan_tbl_entry_cmd *req)
+{
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_desc desc;
+ u8 resp_code;
+ u16 retval;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
+
+ memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "del mac addr failed for cmd_send, ret =%d.\n",
+ ret);
+ return ret;
+ }
+ resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
+ retval = le16_to_cpu(desc.retval);
+
+ return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
+ HCLGE_MAC_VLAN_REMOVE);
+}
+
+static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
+ struct hclge_mac_vlan_tbl_entry_cmd *req,
+ struct hclge_desc *desc,
+ bool is_mc)
+{
+ struct hclge_dev *hdev = vport->back;
+ u8 resp_code;
+ u16 retval;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
+ if (is_mc) {
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ memcpy(desc[0].data,
+ req,
+ sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
+ hclge_cmd_setup_basic_desc(&desc[1],
+ HCLGE_OPC_MAC_VLAN_ADD,
+ true);
+ desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[2],
+ HCLGE_OPC_MAC_VLAN_ADD,
+ true);
+ ret = hclge_cmd_send(&hdev->hw, desc, 3);
+ } else {
+ memcpy(desc[0].data,
+ req,
+ sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
+ ret = hclge_cmd_send(&hdev->hw, desc, 1);
+ }
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "lookup mac addr failed for cmd_send, ret =%d.\n",
+ ret);
+ return ret;
+ }
+ resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
+ retval = le16_to_cpu(desc[0].retval);
+
+ return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
+ HCLGE_MAC_VLAN_LKUP);
+}
+
+static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
+ struct hclge_mac_vlan_tbl_entry_cmd *req,
+ struct hclge_desc *mc_desc)
+{
+ struct hclge_dev *hdev = vport->back;
+ int cfg_status;
+ u8 resp_code;
+ u16 retval;
+ int ret;
+
+ if (!mc_desc) {
+ struct hclge_desc desc;
+
+ hclge_cmd_setup_basic_desc(&desc,
+ HCLGE_OPC_MAC_VLAN_ADD,
+ false);
+ memcpy(desc.data, req,
+ sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
+ retval = le16_to_cpu(desc.retval);
+
+ cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
+ resp_code,
+ HCLGE_MAC_VLAN_ADD);
+ } else {
+ hclge_comm_cmd_reuse_desc(&mc_desc[0], false);
+ mc_desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ hclge_comm_cmd_reuse_desc(&mc_desc[1], false);
+ mc_desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ hclge_comm_cmd_reuse_desc(&mc_desc[2], false);
+ mc_desc[2].flag &= cpu_to_le16(~HCLGE_COMM_CMD_FLAG_NEXT);
+ memcpy(mc_desc[0].data, req,
+ sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
+ ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
+ resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
+ retval = le16_to_cpu(mc_desc[0].retval);
+
+ cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
+ resp_code,
+ HCLGE_MAC_VLAN_ADD);
+ }
+
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "add mac addr failed for cmd_send, ret =%d.\n",
+ ret);
+ return ret;
+ }
+
+ return cfg_status;
+}
+
+static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
+ u16 *allocated_size)
+{
+ struct hclge_umv_spc_alc_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ req = (struct hclge_umv_spc_alc_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
+
+ req->space_size = cpu_to_le32(space_size);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ *allocated_size = le32_to_cpu(desc.data[1]);
+
+ return 0;
+}
+
+static int hclge_init_umv_space(struct hclge_dev *hdev)
+{
+ u16 allocated_size = 0;
+ int ret;
+
+ ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
+ if (ret)
+ return ret;
+
+ if (allocated_size < hdev->wanted_umv_size)
+ dev_warn(&hdev->pdev->dev,
+ "failed to alloc umv space, want %u, get %u\n",
+ hdev->wanted_umv_size, allocated_size);
+
+ hdev->max_umv_size = allocated_size;
+ hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
+ hdev->share_umv_size = hdev->priv_umv_size +
+ hdev->max_umv_size % (hdev->num_alloc_vport + 1);
+
+ if (hdev->ae_dev->dev_specs.mc_mac_size)
+ set_bit(HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, hdev->ae_dev->caps);
+
+ return 0;
+}
+
+static void hclge_reset_umv_space(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport;
+ int i;
+
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ vport = &hdev->vport[i];
+ vport->used_umv_num = 0;
+ }
+
+ mutex_lock(&hdev->vport_lock);
+ hdev->share_umv_size = hdev->priv_umv_size +
+ hdev->max_umv_size % (hdev->num_alloc_vport + 1);
+ mutex_unlock(&hdev->vport_lock);
+
+ hdev->used_mc_mac_num = 0;
+}
+
+static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
+{
+ struct hclge_dev *hdev = vport->back;
+ bool is_full;
+
+ if (need_lock)
+ mutex_lock(&hdev->vport_lock);
+
+ is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
+ hdev->share_umv_size == 0);
+
+ if (need_lock)
+ mutex_unlock(&hdev->vport_lock);
+
+ return is_full;
+}
+
+static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
+{
+ struct hclge_dev *hdev = vport->back;
+
+ if (is_free) {
+ if (vport->used_umv_num > hdev->priv_umv_size)
+ hdev->share_umv_size++;
+
+ if (vport->used_umv_num > 0)
+ vport->used_umv_num--;
+ } else {
+ if (vport->used_umv_num >= hdev->priv_umv_size &&
+ hdev->share_umv_size > 0)
+ hdev->share_umv_size--;
+ vport->used_umv_num++;
+ }
+}
+
+static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
+ const u8 *mac_addr)
+{
+ struct hclge_mac_node *mac_node, *tmp;
+
+ list_for_each_entry_safe(mac_node, tmp, list, node)
+ if (ether_addr_equal(mac_addr, mac_node->mac_addr))
+ return mac_node;
+
+ return NULL;
+}
+
+static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
+ enum HCLGE_MAC_NODE_STATE state)
+{
+ switch (state) {
+ /* from set_rx_mode or tmp_add_list */
+ case HCLGE_MAC_TO_ADD:
+ if (mac_node->state == HCLGE_MAC_TO_DEL)
+ mac_node->state = HCLGE_MAC_ACTIVE;
+ break;
+ /* only from set_rx_mode */
+ case HCLGE_MAC_TO_DEL:
+ if (mac_node->state == HCLGE_MAC_TO_ADD) {
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ } else {
+ mac_node->state = HCLGE_MAC_TO_DEL;
+ }
+ break;
+ /* only from tmp_add_list, the mac_node->state won't be
+ * ACTIVE.
+ */
+ case HCLGE_MAC_ACTIVE:
+ if (mac_node->state == HCLGE_MAC_TO_ADD)
+ mac_node->state = HCLGE_MAC_ACTIVE;
+
+ break;
+ }
+}
+
+int hclge_update_mac_list(struct hclge_vport *vport,
+ enum HCLGE_MAC_NODE_STATE state,
+ enum HCLGE_MAC_ADDR_TYPE mac_type,
+ const unsigned char *addr)
+{
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_mac_node *mac_node;
+ struct list_head *list;
+
+ list = (mac_type == HCLGE_MAC_ADDR_UC) ?
+ &vport->uc_mac_list : &vport->mc_mac_list;
+
+ spin_lock_bh(&vport->mac_list_lock);
+
+ /* if the mac addr is already in the mac list, no need to add a new
+ * one into it, just check the mac addr state, convert it to a new
+ * state, or just remove it, or do nothing.
+ */
+ mac_node = hclge_find_mac_node(list, addr);
+ if (mac_node) {
+ hclge_update_mac_node(mac_node, state);
+ spin_unlock_bh(&vport->mac_list_lock);
+ set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
+ return 0;
+ }
+
+ /* if this address is never added, unnecessary to delete */
+ if (state == HCLGE_MAC_TO_DEL) {
+ spin_unlock_bh(&vport->mac_list_lock);
+ hnae3_format_mac_addr(format_mac_addr, addr);
+ dev_err(&hdev->pdev->dev,
+ "failed to delete address %s from mac list\n",
+ format_mac_addr);
+ return -ENOENT;
+ }
+
+ mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
+ if (!mac_node) {
+ spin_unlock_bh(&vport->mac_list_lock);
+ return -ENOMEM;
+ }
+
+ set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
+
+ mac_node->state = state;
+ ether_addr_copy(mac_node->mac_addr, addr);
+ list_add_tail(&mac_node->node, list);
+
+ spin_unlock_bh(&vport->mac_list_lock);
+
+ return 0;
+}
+
+static int hclge_add_uc_addr(struct hnae3_handle *handle,
+ const unsigned char *addr)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+
+ return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
+ addr);
+}
+
+int hclge_add_uc_addr_common(struct hclge_vport *vport,
+ const unsigned char *addr)
+{
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_mac_vlan_tbl_entry_cmd req;
+ struct hclge_desc desc;
+ u16 egress_port = 0;
+ int ret;
+
+ /* mac addr check */
+ if (is_zero_ether_addr(addr) ||
+ is_broadcast_ether_addr(addr) ||
+ is_multicast_ether_addr(addr)) {
+ hnae3_format_mac_addr(format_mac_addr, addr);
+ dev_err(&hdev->pdev->dev,
+ "Set_uc mac err! invalid mac:%s. is_zero:%d,is_br=%d,is_mul=%d\n",
+ format_mac_addr, is_zero_ether_addr(addr),
+ is_broadcast_ether_addr(addr),
+ is_multicast_ether_addr(addr));
+ return -EINVAL;
+ }
+
+ memset(&req, 0, sizeof(req));
+
+ hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
+ HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
+
+ req.egress_port = cpu_to_le16(egress_port);
+
+ hclge_prepare_mac_addr(&req, addr, false);
+
+ /* Lookup the mac address in the mac_vlan table, and add
+ * it if the entry is inexistent. Repeated unicast entry
+ * is not allowed in the mac vlan table.
+ */
+ ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
+ if (ret == -ENOENT) {
+ mutex_lock(&hdev->vport_lock);
+ if (!hclge_is_umv_space_full(vport, false)) {
+ ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
+ if (!ret)
+ hclge_update_umv_space(vport, false);
+ mutex_unlock(&hdev->vport_lock);
+ return ret;
+ }
+ mutex_unlock(&hdev->vport_lock);
+
+ if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
+ dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
+ hdev->priv_umv_size);
+
+ return -ENOSPC;
+ }
+
+ /* check if we just hit the duplicate */
+ if (!ret)
+ return -EEXIST;
+
+ return ret;
+}
+
+static int hclge_rm_uc_addr(struct hnae3_handle *handle,
+ const unsigned char *addr)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+
+ return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
+ addr);
+}
+
+int hclge_rm_uc_addr_common(struct hclge_vport *vport,
+ const unsigned char *addr)
+{
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_mac_vlan_tbl_entry_cmd req;
+ int ret;
+
+ /* mac addr check */
+ if (is_zero_ether_addr(addr) ||
+ is_broadcast_ether_addr(addr) ||
+ is_multicast_ether_addr(addr)) {
+ hnae3_format_mac_addr(format_mac_addr, addr);
+ dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%s.\n",
+ format_mac_addr);
+ return -EINVAL;
+ }
+
+ memset(&req, 0, sizeof(req));
+ hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
+ hclge_prepare_mac_addr(&req, addr, false);
+ ret = hclge_remove_mac_vlan_tbl(vport, &req);
+ if (!ret || ret == -ENOENT) {
+ mutex_lock(&hdev->vport_lock);
+ hclge_update_umv_space(vport, true);
+ mutex_unlock(&hdev->vport_lock);
+ return 0;
+ }
+
+ return ret;
+}
+
+static int hclge_add_mc_addr(struct hnae3_handle *handle,
+ const unsigned char *addr)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+
+ return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
+ addr);
+}
+
+int hclge_add_mc_addr_common(struct hclge_vport *vport,
+ const unsigned char *addr)
+{
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_mac_vlan_tbl_entry_cmd req;
+ struct hclge_desc desc[3];
+ bool is_new_addr = false;
+ int status;
+
+ /* mac addr check */
+ if (!is_multicast_ether_addr(addr)) {
+ hnae3_format_mac_addr(format_mac_addr, addr);
+ dev_err(&hdev->pdev->dev,
+ "Add mc mac err! invalid mac:%s.\n",
+ format_mac_addr);
+ return -EINVAL;
+ }
+ memset(&req, 0, sizeof(req));
+ hclge_prepare_mac_addr(&req, addr, true);
+ status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
+ if (status) {
+ if (hnae3_ae_dev_mc_mac_mng_supported(hdev->ae_dev) &&
+ hdev->used_mc_mac_num >=
+ hdev->ae_dev->dev_specs.mc_mac_size)
+ goto err_no_space;
+
+ is_new_addr = true;
+
+ /* This mac addr do not exist, add new entry for it */
+ memset(desc[0].data, 0, sizeof(desc[0].data));
+ memset(desc[1].data, 0, sizeof(desc[0].data));
+ memset(desc[2].data, 0, sizeof(desc[0].data));
+ }
+ status = hclge_update_desc_vfid(desc, vport->vport_id, false);
+ if (status)
+ return status;
+ status = hclge_add_mac_vlan_tbl(vport, &req, desc);
+ if (status == -ENOSPC)
+ goto err_no_space;
+ else if (!status && is_new_addr)
+ hdev->used_mc_mac_num++;
+
+ return status;
+
+err_no_space:
+ /* if already overflow, not to print each time */
+ if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE)) {
+ vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
+ dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
+ }
+
+ return -ENOSPC;
+}
+
+static int hclge_rm_mc_addr(struct hnae3_handle *handle,
+ const unsigned char *addr)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+
+ return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
+ addr);
+}
+
+int hclge_rm_mc_addr_common(struct hclge_vport *vport,
+ const unsigned char *addr)
+{
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_mac_vlan_tbl_entry_cmd req;
+ enum hclge_comm_cmd_status status;
+ struct hclge_desc desc[3];
+
+ /* mac addr check */
+ if (!is_multicast_ether_addr(addr)) {
+ hnae3_format_mac_addr(format_mac_addr, addr);
+ dev_dbg(&hdev->pdev->dev,
+ "Remove mc mac err! invalid mac:%s.\n",
+ format_mac_addr);
+ return -EINVAL;
+ }
+
+ memset(&req, 0, sizeof(req));
+ hclge_prepare_mac_addr(&req, addr, true);
+ status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
+ if (!status) {
+ /* This mac addr exist, remove this handle's VFID for it */
+ status = hclge_update_desc_vfid(desc, vport->vport_id, true);
+ if (status)
+ return status;
+
+ if (hclge_is_all_function_id_zero(desc)) {
+ /* All the vfid is zero, so need to delete this entry */
+ status = hclge_remove_mac_vlan_tbl(vport, &req);
+ if (!status)
+ hdev->used_mc_mac_num--;
+ } else {
+ /* Not all the vfid is zero, update the vfid */
+ status = hclge_add_mac_vlan_tbl(vport, &req, desc);
+ }
+ } else if (status == -ENOENT) {
+ status = 0;
+ }
+
+ return status;
+}
+
+static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
+ struct list_head *list,
+ enum HCLGE_MAC_ADDR_TYPE mac_type)
+{
+ int (*sync)(struct hclge_vport *vport, const unsigned char *addr);
+ struct hclge_mac_node *mac_node, *tmp;
+ int ret;
+
+ if (mac_type == HCLGE_MAC_ADDR_UC)
+ sync = hclge_add_uc_addr_common;
+ else
+ sync = hclge_add_mc_addr_common;
+
+ list_for_each_entry_safe(mac_node, tmp, list, node) {
+ ret = sync(vport, mac_node->mac_addr);
+ if (!ret) {
+ mac_node->state = HCLGE_MAC_ACTIVE;
+ } else {
+ set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
+ &vport->state);
+
+ /* If one unicast mac address is existing in hardware,
+ * we need to try whether other unicast mac addresses
+ * are new addresses that can be added.
+ * Multicast mac address can be reusable, even though
+ * there is no space to add new multicast mac address,
+ * we should check whether other mac addresses are
+ * existing in hardware for reuse.
+ */
+ if ((mac_type == HCLGE_MAC_ADDR_UC && ret != -EEXIST) ||
+ (mac_type == HCLGE_MAC_ADDR_MC && ret != -ENOSPC))
+ break;
+ }
+ }
+}
+
+static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
+ struct list_head *list,
+ enum HCLGE_MAC_ADDR_TYPE mac_type)
+{
+ int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
+ struct hclge_mac_node *mac_node, *tmp;
+ int ret;
+
+ if (mac_type == HCLGE_MAC_ADDR_UC)
+ unsync = hclge_rm_uc_addr_common;
+ else
+ unsync = hclge_rm_mc_addr_common;
+
+ list_for_each_entry_safe(mac_node, tmp, list, node) {
+ ret = unsync(vport, mac_node->mac_addr);
+ if (!ret || ret == -ENOENT) {
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ } else {
+ set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
+ &vport->state);
+ break;
+ }
+ }
+}
+
+static bool hclge_sync_from_add_list(struct list_head *add_list,
+ struct list_head *mac_list)
+{
+ struct hclge_mac_node *mac_node, *tmp, *new_node;
+ bool all_added = true;
+
+ list_for_each_entry_safe(mac_node, tmp, add_list, node) {
+ if (mac_node->state == HCLGE_MAC_TO_ADD)
+ all_added = false;
+
+ /* if the mac address from tmp_add_list is not in the
+ * uc/mc_mac_list, it means have received a TO_DEL request
+ * during the time window of adding the mac address into mac
+ * table. if mac_node state is ACTIVE, then change it to TO_DEL,
+ * then it will be removed at next time. else it must be TO_ADD,
+ * this address hasn't been added into mac table,
+ * so just remove the mac node.
+ */
+ new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
+ if (new_node) {
+ hclge_update_mac_node(new_node, mac_node->state);
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ } else if (mac_node->state == HCLGE_MAC_ACTIVE) {
+ mac_node->state = HCLGE_MAC_TO_DEL;
+ list_move_tail(&mac_node->node, mac_list);
+ } else {
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ }
+ }
+
+ return all_added;
+}
+
+static void hclge_sync_from_del_list(struct list_head *del_list,
+ struct list_head *mac_list)
+{
+ struct hclge_mac_node *mac_node, *tmp, *new_node;
+
+ list_for_each_entry_safe(mac_node, tmp, del_list, node) {
+ new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
+ if (new_node) {
+ /* If the mac addr exists in the mac list, it means
+ * received a new TO_ADD request during the time window
+ * of configuring the mac address. For the mac node
+ * state is TO_ADD, and the address is already in the
+ * in the hardware(due to delete fail), so we just need
+ * to change the mac node state to ACTIVE.
+ */
+ new_node->state = HCLGE_MAC_ACTIVE;
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ } else {
+ list_move_tail(&mac_node->node, mac_list);
+ }
+ }
+}
+
+static void hclge_update_overflow_flags(struct hclge_vport *vport,
+ enum HCLGE_MAC_ADDR_TYPE mac_type,
+ bool is_all_added)
+{
+ if (mac_type == HCLGE_MAC_ADDR_UC) {
+ if (is_all_added)
+ vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
+ else if (hclge_is_umv_space_full(vport, true))
+ vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
+ } else {
+ if (is_all_added)
+ vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
+ else
+ vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
+ }
+}
+
+static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
+ enum HCLGE_MAC_ADDR_TYPE mac_type)
+{
+ struct hclge_mac_node *mac_node, *tmp, *new_node;
+ struct list_head tmp_add_list, tmp_del_list;
+ struct list_head *list;
+ bool all_added;
+
+ INIT_LIST_HEAD(&tmp_add_list);
+ INIT_LIST_HEAD(&tmp_del_list);
+
+ /* move the mac addr to the tmp_add_list and tmp_del_list, then
+ * we can add/delete these mac addr outside the spin lock
+ */
+ list = (mac_type == HCLGE_MAC_ADDR_UC) ?
+ &vport->uc_mac_list : &vport->mc_mac_list;
+
+ spin_lock_bh(&vport->mac_list_lock);
+
+ list_for_each_entry_safe(mac_node, tmp, list, node) {
+ switch (mac_node->state) {
+ case HCLGE_MAC_TO_DEL:
+ list_move_tail(&mac_node->node, &tmp_del_list);
+ break;
+ case HCLGE_MAC_TO_ADD:
+ new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
+ if (!new_node)
+ goto stop_traverse;
+ ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
+ new_node->state = mac_node->state;
+ list_add_tail(&new_node->node, &tmp_add_list);
+ break;
+ default:
+ break;
+ }
+ }
+
+stop_traverse:
+ spin_unlock_bh(&vport->mac_list_lock);
+
+ /* delete first, in order to get max mac table space for adding */
+ hclge_unsync_vport_mac_list(vport, &tmp_del_list, mac_type);
+ hclge_sync_vport_mac_list(vport, &tmp_add_list, mac_type);
+
+ /* if some mac addresses were added/deleted fail, move back to the
+ * mac_list, and retry at next time.
+ */
+ spin_lock_bh(&vport->mac_list_lock);
+
+ hclge_sync_from_del_list(&tmp_del_list, list);
+ all_added = hclge_sync_from_add_list(&tmp_add_list, list);
+
+ spin_unlock_bh(&vport->mac_list_lock);
+
+ hclge_update_overflow_flags(vport, mac_type, all_added);
+}
+
+static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
+{
+ struct hclge_dev *hdev = vport->back;
+
+ if (test_bit(vport->vport_id, hdev->vport_config_block))
+ return false;
+
+ if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
+ return true;
+
+ return false;
+}
+
+static void hclge_sync_mac_table(struct hclge_dev *hdev)
+{
+ int i;
+
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ struct hclge_vport *vport = &hdev->vport[i];
+
+ if (!hclge_need_sync_mac_table(vport))
+ continue;
+
+ hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
+ hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
+ }
+}
+
+static void hclge_build_del_list(struct list_head *list,
+ bool is_del_list,
+ struct list_head *tmp_del_list)
+{
+ struct hclge_mac_node *mac_cfg, *tmp;
+
+ list_for_each_entry_safe(mac_cfg, tmp, list, node) {
+ switch (mac_cfg->state) {
+ case HCLGE_MAC_TO_DEL:
+ case HCLGE_MAC_ACTIVE:
+ list_move_tail(&mac_cfg->node, tmp_del_list);
+ break;
+ case HCLGE_MAC_TO_ADD:
+ if (is_del_list) {
+ list_del(&mac_cfg->node);
+ kfree(mac_cfg);
+ }
+ break;
+ }
+ }
+}
+
+static void hclge_unsync_del_list(struct hclge_vport *vport,
+ int (*unsync)(struct hclge_vport *vport,
+ const unsigned char *addr),
+ bool is_del_list,
+ struct list_head *tmp_del_list)
+{
+ struct hclge_mac_node *mac_cfg, *tmp;
+ int ret;
+
+ list_for_each_entry_safe(mac_cfg, tmp, tmp_del_list, node) {
+ ret = unsync(vport, mac_cfg->mac_addr);
+ if (!ret || ret == -ENOENT) {
+ /* clear all mac addr from hardware, but remain these
+ * mac addr in the mac list, and restore them after
+ * vf reset finished.
+ */
+ if (!is_del_list &&
+ mac_cfg->state == HCLGE_MAC_ACTIVE) {
+ mac_cfg->state = HCLGE_MAC_TO_ADD;
+ } else {
+ list_del(&mac_cfg->node);
+ kfree(mac_cfg);
+ }
+ } else if (is_del_list) {
+ mac_cfg->state = HCLGE_MAC_TO_DEL;
+ }
+ }
+}
+
+void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
+ enum HCLGE_MAC_ADDR_TYPE mac_type)
+{
+ int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
+ struct hclge_dev *hdev = vport->back;
+ struct list_head tmp_del_list, *list;
+
+ if (mac_type == HCLGE_MAC_ADDR_UC) {
+ list = &vport->uc_mac_list;
+ unsync = hclge_rm_uc_addr_common;
+ } else {
+ list = &vport->mc_mac_list;
+ unsync = hclge_rm_mc_addr_common;
+ }
+
+ INIT_LIST_HEAD(&tmp_del_list);
+
+ if (!is_del_list)
+ set_bit(vport->vport_id, hdev->vport_config_block);
+
+ spin_lock_bh(&vport->mac_list_lock);
+
+ hclge_build_del_list(list, is_del_list, &tmp_del_list);
+
+ spin_unlock_bh(&vport->mac_list_lock);
+
+ hclge_unsync_del_list(vport, unsync, is_del_list, &tmp_del_list);
+
+ spin_lock_bh(&vport->mac_list_lock);
+
+ hclge_sync_from_del_list(&tmp_del_list, list);
+
+ spin_unlock_bh(&vport->mac_list_lock);
+}
+
+/* remove all mac address when uninitailize */
+static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
+ enum HCLGE_MAC_ADDR_TYPE mac_type)
+{
+ struct hclge_mac_node *mac_node, *tmp;
+ struct hclge_dev *hdev = vport->back;
+ struct list_head tmp_del_list, *list;
+
+ INIT_LIST_HEAD(&tmp_del_list);
+
+ list = (mac_type == HCLGE_MAC_ADDR_UC) ?
+ &vport->uc_mac_list : &vport->mc_mac_list;
+
+ spin_lock_bh(&vport->mac_list_lock);
+
+ list_for_each_entry_safe(mac_node, tmp, list, node) {
+ switch (mac_node->state) {
+ case HCLGE_MAC_TO_DEL:
+ case HCLGE_MAC_ACTIVE:
+ list_move_tail(&mac_node->node, &tmp_del_list);
+ break;
+ case HCLGE_MAC_TO_ADD:
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ break;
+ }
+ }
+
+ spin_unlock_bh(&vport->mac_list_lock);
+
+ hclge_unsync_vport_mac_list(vport, &tmp_del_list, mac_type);
+
+ if (!list_empty(&tmp_del_list))
+ dev_warn(&hdev->pdev->dev,
+ "uninit %s mac list for vport %u not completely.\n",
+ mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
+ vport->vport_id);
+
+ list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ }
+}
+
+static void hclge_uninit_mac_table(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport;
+ int i;
+
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ vport = &hdev->vport[i];
+ hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
+ hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
+ }
+}
+
+static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
+ u16 cmdq_resp, u8 resp_code)
+{
+#define HCLGE_ETHERTYPE_SUCCESS_ADD 0
+#define HCLGE_ETHERTYPE_ALREADY_ADD 1
+#define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
+#define HCLGE_ETHERTYPE_KEY_CONFLICT 3
+
+ int return_status;
+
+ if (cmdq_resp) {
+ dev_err(&hdev->pdev->dev,
+ "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
+ cmdq_resp);
+ return -EIO;
+ }
+
+ switch (resp_code) {
+ case HCLGE_ETHERTYPE_SUCCESS_ADD:
+ case HCLGE_ETHERTYPE_ALREADY_ADD:
+ return_status = 0;
+ break;
+ case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
+ dev_err(&hdev->pdev->dev,
+ "add mac ethertype failed for manager table overflow.\n");
+ return_status = -EIO;
+ break;
+ case HCLGE_ETHERTYPE_KEY_CONFLICT:
+ dev_err(&hdev->pdev->dev,
+ "add mac ethertype failed for key conflict.\n");
+ return_status = -EIO;
+ break;
+ default:
+ dev_err(&hdev->pdev->dev,
+ "add mac ethertype failed for undefined, code=%u.\n",
+ resp_code);
+ return_status = -EIO;
+ }
+
+ return return_status;
+}
+
+static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
+ u8 *mac_addr)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
+ struct hclge_dev *hdev = vport->back;
+
+ vport = hclge_get_vf_vport(hdev, vf);
+ if (!vport)
+ return -EINVAL;
+
+ hnae3_format_mac_addr(format_mac_addr, mac_addr);
+ if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
+ dev_info(&hdev->pdev->dev,
+ "Specified MAC(=%s) is same as before, no change committed!\n",
+ format_mac_addr);
+ return 0;
+ }
+
+ ether_addr_copy(vport->vf_info.mac, mac_addr);
+
+ /* there is a timewindow for PF to know VF unalive, it may
+ * cause send mailbox fail, but it doesn't matter, VF will
+ * query it when reinit.
+ */
+ if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
+ dev_info(&hdev->pdev->dev,
+ "MAC of VF %d has been set to %s, and it will be reinitialized!\n",
+ vf, format_mac_addr);
+ (void)hclge_inform_reset_assert_to_vf(vport);
+ return 0;
+ }
+
+ dev_info(&hdev->pdev->dev,
+ "MAC of VF %d has been set to %s, will be active after VF reset\n",
+ vf, format_mac_addr);
+ return 0;
+}
+
+static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
+ const struct hclge_mac_mgr_tbl_entry_cmd *req)
+{
+ struct hclge_desc desc;
+ u8 resp_code;
+ u16 retval;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
+ memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "add mac ethertype failed for cmd_send, ret =%d.\n",
+ ret);
+ return ret;
+ }
+
+ resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
+ retval = le16_to_cpu(desc.retval);
+
+ return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
+}
+
+static int init_mgr_tbl(struct hclge_dev *hdev)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
+ ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "add mac ethertype failed, ret =%d.\n",
+ ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ ether_addr_copy(p, hdev->hw.mac.mac_addr);
+}
+
+int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
+ const u8 *old_addr, const u8 *new_addr)
+{
+ struct list_head *list = &vport->uc_mac_list;
+ struct hclge_mac_node *old_node, *new_node;
+
+ new_node = hclge_find_mac_node(list, new_addr);
+ if (!new_node) {
+ new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
+ if (!new_node)
+ return -ENOMEM;
+
+ new_node->state = HCLGE_MAC_TO_ADD;
+ ether_addr_copy(new_node->mac_addr, new_addr);
+ list_add(&new_node->node, list);
+ } else {
+ if (new_node->state == HCLGE_MAC_TO_DEL)
+ new_node->state = HCLGE_MAC_ACTIVE;
+
+ /* make sure the new addr is in the list head, avoid dev
+ * addr may be not re-added into mac table for the umv space
+ * limitation after global/imp reset which will clear mac
+ * table by hardware.
+ */
+ list_move(&new_node->node, list);
+ }
+
+ if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
+ old_node = hclge_find_mac_node(list, old_addr);
+ if (old_node) {
+ if (old_node->state == HCLGE_MAC_TO_ADD) {
+ list_del(&old_node->node);
+ kfree(old_node);
+ } else {
+ old_node->state = HCLGE_MAC_TO_DEL;
+ }
+ }
+ }
+
+ set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
+
+ return 0;
+}
+
+static int hclge_set_mac_addr(struct hnae3_handle *handle, const void *p,
+ bool is_first)
+{
+ const unsigned char *new_addr = (const unsigned char *)p;
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
+ struct hclge_dev *hdev = vport->back;
+ unsigned char *old_addr = NULL;
+ int ret;
+
+ /* mac addr check */
+ if (is_zero_ether_addr(new_addr) ||
+ is_broadcast_ether_addr(new_addr) ||
+ is_multicast_ether_addr(new_addr)) {
+ hnae3_format_mac_addr(format_mac_addr, new_addr);
+ dev_err(&hdev->pdev->dev,
+ "change uc mac err! invalid mac: %s.\n",
+ format_mac_addr);
+ return -EINVAL;
+ }
+
+ ret = hclge_pause_addr_cfg(hdev, new_addr);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to configure mac pause address, ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ if (!is_first)
+ old_addr = hdev->hw.mac.mac_addr;
+
+ spin_lock_bh(&vport->mac_list_lock);
+ ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
+ if (ret) {
+ hnae3_format_mac_addr(format_mac_addr, new_addr);
+ dev_err(&hdev->pdev->dev,
+ "failed to change the mac addr:%s, ret = %d\n",
+ format_mac_addr, ret);
+ spin_unlock_bh(&vport->mac_list_lock);
+
+ if (!is_first)
+ hclge_pause_addr_cfg(hdev, old_addr);
+
+ return ret;
+ }
+ /* we must update dev addr with spin lock protect, preventing dev addr
+ * being removed by set_rx_mode path.
+ */
+ ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
+ spin_unlock_bh(&vport->mac_list_lock);
+
+ hclge_task_schedule(hdev, 0);
+
+ return 0;
+}
+
+static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd)
+{
+ struct mii_ioctl_data *data = if_mii(ifr);
+
+ if (!hnae3_dev_phy_imp_supported(hdev))
+ return -EOPNOTSUPP;
+
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ data->phy_id = hdev->hw.mac.phy_addr;
+ /* this command reads phy id and register at the same time */
+ fallthrough;
+ case SIOCGMIIREG:
+ data->val_out = hclge_read_phy_reg(hdev, data->reg_num);
+ return 0;
+
+ case SIOCSMIIREG:
+ return hclge_write_phy_reg(hdev, data->reg_num, data->val_in);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
+ int cmd)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ switch (cmd) {
+ case SIOCGHWTSTAMP:
+ return hclge_ptp_get_cfg(hdev, ifr);
+ case SIOCSHWTSTAMP:
+ return hclge_ptp_set_cfg(hdev, ifr);
+ default:
+ if (!hdev->hw.mac.phydev)
+ return hclge_mii_ioctl(hdev, ifr, cmd);
+ }
+
+ return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
+}
+
+static int hclge_set_port_vlan_filter_bypass(struct hclge_dev *hdev, u8 vf_id,
+ bool bypass_en)
+{
+ struct hclge_port_vlan_filter_bypass_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PORT_VLAN_BYPASS, false);
+ req = (struct hclge_port_vlan_filter_bypass_cmd *)desc.data;
+ req->vf_id = vf_id;
+ hnae3_set_bit(req->bypass_state, HCLGE_INGRESS_BYPASS_B,
+ bypass_en ? 1 : 0);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to set vport%u port vlan filter bypass state, ret = %d.\n",
+ vf_id, ret);
+
+ return ret;
+}
+
+static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
+ u8 fe_type, bool filter_en, u8 vf_id)
+{
+ struct hclge_vlan_filter_ctrl_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ /* read current vlan filter parameter */
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
+ req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
+ req->vlan_type = vlan_type;
+ req->vf_id = vf_id;
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "failed to get vport%u vlan filter config, ret = %d.\n",
+ vf_id, ret);
+ return ret;
+ }
+
+ /* modify and write new config parameter */
+ hclge_comm_cmd_reuse_desc(&desc, false);
+ req->vlan_fe = filter_en ?
+ (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "failed to set vport%u vlan filter, ret = %d.\n",
+ vf_id, ret);
+
+ return ret;
+}
+
+static int hclge_set_vport_vlan_filter(struct hclge_vport *vport, bool enable)
+{
+ struct hclge_dev *hdev = vport->back;
+ struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
+ int ret;
+
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
+ return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
+ HCLGE_FILTER_FE_EGRESS_V1_B,
+ enable, vport->vport_id);
+
+ ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
+ HCLGE_FILTER_FE_EGRESS, enable,
+ vport->vport_id);
+ if (ret)
+ return ret;
+
+ if (test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, ae_dev->caps)) {
+ ret = hclge_set_port_vlan_filter_bypass(hdev, vport->vport_id,
+ !enable);
+ } else if (!vport->vport_id) {
+ if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps))
+ enable = false;
+
+ ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
+ HCLGE_FILTER_FE_INGRESS,
+ enable, 0);
+ }
+
+ return ret;
+}
+
+static bool hclge_need_enable_vport_vlan_filter(struct hclge_vport *vport)
+{
+ struct hnae3_handle *handle = &vport->nic;
+ struct hclge_vport_vlan_cfg *vlan, *tmp;
+ struct hclge_dev *hdev = vport->back;
+
+ if (vport->vport_id) {
+ if (vport->port_base_vlan_cfg.state !=
+ HNAE3_PORT_BASE_VLAN_DISABLE)
+ return true;
+
+ if (vport->vf_info.trusted && vport->vf_info.request_uc_en)
+ return false;
+ } else if (handle->netdev_flags & HNAE3_USER_UPE) {
+ return false;
+ }
+
+ if (!vport->req_vlan_fltr_en)
+ return false;
+
+ /* compatible with former device, always enable vlan filter */
+ if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
+ return true;
+
+ list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
+ if (vlan->vlan_id != 0)
+ return true;
+
+ return false;
+}
+
+int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en)
+{
+ struct hclge_dev *hdev = vport->back;
+ bool need_en;
+ int ret;
+
+ mutex_lock(&hdev->vport_lock);
+
+ vport->req_vlan_fltr_en = request_en;
+
+ need_en = hclge_need_enable_vport_vlan_filter(vport);
+ if (need_en == vport->cur_vlan_fltr_en) {
+ mutex_unlock(&hdev->vport_lock);
+ return 0;
+ }
+
+ ret = hclge_set_vport_vlan_filter(vport, need_en);
+ if (ret) {
+ mutex_unlock(&hdev->vport_lock);
+ return ret;
+ }
+
+ vport->cur_vlan_fltr_en = need_en;
+
+ mutex_unlock(&hdev->vport_lock);
+
+ return 0;
+}
+
+static int hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+
+ return hclge_enable_vport_vlan_filter(vport, enable);
+}
+
+static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid,
+ bool is_kill, u16 vlan,
+ struct hclge_desc *desc)
+{
+ struct hclge_vlan_filter_vf_cfg_cmd *req0;
+ struct hclge_vlan_filter_vf_cfg_cmd *req1;
+ u8 vf_byte_val;
+ u8 vf_byte_off;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc[0],
+ HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
+ hclge_cmd_setup_basic_desc(&desc[1],
+ HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
+
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+
+ vf_byte_off = vfid / 8;
+ vf_byte_val = 1 << (vfid % 8);
+
+ req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
+ req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
+
+ req0->vlan_id = cpu_to_le16(vlan);
+ req0->vlan_cfg = is_kill;
+
+ if (vf_byte_off < HCLGE_MAX_VF_BYTES)
+ req0->vf_bitmap[vf_byte_off] = vf_byte_val;
+ else
+ req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
+
+ ret = hclge_cmd_send(&hdev->hw, desc, 2);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Send vf vlan command fail, ret =%d.\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid,
+ bool is_kill, struct hclge_desc *desc)
+{
+ struct hclge_vlan_filter_vf_cfg_cmd *req;
+
+ req = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
+
+ if (!is_kill) {
+#define HCLGE_VF_VLAN_NO_ENTRY 2
+ if (!req->resp_code || req->resp_code == 1)
+ return 0;
+
+ if (req->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
+ set_bit(vfid, hdev->vf_vlan_full);
+ dev_warn(&hdev->pdev->dev,
+ "vf vlan table is full, vf vlan filter is disabled\n");
+ return 0;
+ }
+
+ dev_err(&hdev->pdev->dev,
+ "Add vf vlan filter fail, ret =%u.\n",
+ req->resp_code);
+ } else {
+#define HCLGE_VF_VLAN_DEL_NO_FOUND 1
+ if (!req->resp_code)
+ return 0;
+
+ /* vf vlan filter is disabled when vf vlan table is full,
+ * then new vlan id will not be added into vf vlan table.
+ * Just return 0 without warning, avoid massive verbose
+ * print logs when unload.
+ */
+ if (req->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
+ return 0;
+
+ dev_err(&hdev->pdev->dev,
+ "Kill vf vlan filter fail, ret =%u.\n",
+ req->resp_code);
+ }
+
+ return -EIO;
+}
+
+static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
+ bool is_kill, u16 vlan)
+{
+ struct hclge_vport *vport = &hdev->vport[vfid];
+ struct hclge_desc desc[2];
+ int ret;
+
+ /* if vf vlan table is full, firmware will close vf vlan filter, it
+ * is unable and unnecessary to add new vlan id to vf vlan filter.
+ * If spoof check is enable, and vf vlan is full, it shouldn't add
+ * new vlan, because tx packets with these vlan id will be dropped.
+ */
+ if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
+ if (vport->vf_info.spoofchk && vlan) {
+ dev_err(&hdev->pdev->dev,
+ "Can't add vlan due to spoof check is on and vf vlan table is full\n");
+ return -EPERM;
+ }
+ return 0;
+ }
+
+ ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc);
+ if (ret)
+ return ret;
+
+ return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc);
+}
+
+static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
+ u16 vlan_id, bool is_kill)
+{
+ struct hclge_vlan_filter_pf_cfg_cmd *req;
+ struct hclge_desc desc;
+ u8 vlan_offset_byte_val;
+ u8 vlan_offset_byte;
+ u8 vlan_offset_160;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
+
+ vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
+ vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
+ HCLGE_VLAN_BYTE_SIZE;
+ vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
+
+ req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
+ req->vlan_offset = vlan_offset_160;
+ req->vlan_cfg = is_kill;
+ req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "port vlan command, send fail, ret =%d.\n", ret);
+ return ret;
+}
+
+static bool hclge_need_update_port_vlan(struct hclge_dev *hdev, u16 vport_id,
+ u16 vlan_id, bool is_kill)
+{
+ /* vlan 0 may be added twice when 8021q module is enabled */
+ if (!is_kill && !vlan_id &&
+ test_bit(vport_id, hdev->vlan_table[vlan_id]))
+ return false;
+
+ if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
+ dev_warn(&hdev->pdev->dev,
+ "Add port vlan failed, vport %u is already in vlan %u\n",
+ vport_id, vlan_id);
+ return false;
+ }
+
+ if (is_kill &&
+ !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
+ dev_warn(&hdev->pdev->dev,
+ "Delete port vlan failed, vport %u is not in vlan %u\n",
+ vport_id, vlan_id);
+ return false;
+ }
+
+ return true;
+}
+
+static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
+ u16 vport_id, u16 vlan_id,
+ bool is_kill)
+{
+ u16 vport_idx, vport_num = 0;
+ int ret;
+
+ if (is_kill && !vlan_id)
+ return 0;
+
+ if (vlan_id >= VLAN_N_VID)
+ return -EINVAL;
+
+ ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Set %u vport vlan filter config fail, ret =%d.\n",
+ vport_id, ret);
+ return ret;
+ }
+
+ if (!hclge_need_update_port_vlan(hdev, vport_id, vlan_id, is_kill))
+ return 0;
+
+ for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
+ vport_num++;
+
+ if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
+ ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
+ is_kill);
+
+ return ret;
+}
+
+static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
+{
+ struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
+ struct hclge_vport_vtag_tx_cfg_cmd *req;
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_desc desc;
+ u16 bmap_index;
+ int status;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
+
+ req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
+ req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
+ req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
+ vcfg->accept_tag1 ? 1 : 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
+ vcfg->accept_untag1 ? 1 : 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
+ vcfg->accept_tag2 ? 1 : 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
+ vcfg->accept_untag2 ? 1 : 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
+ vcfg->insert_tag1_en ? 1 : 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
+ vcfg->insert_tag2_en ? 1 : 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B,
+ vcfg->tag_shift_mode_en ? 1 : 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
+
+ req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
+ bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
+ HCLGE_VF_NUM_PER_BYTE;
+ req->vf_bitmap[bmap_index] =
+ 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
+
+ status = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "Send port txvlan cfg command fail, ret =%d\n",
+ status);
+
+ return status;
+}
+
+static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
+{
+ struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
+ struct hclge_vport_vtag_rx_cfg_cmd *req;
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_desc desc;
+ u16 bmap_index;
+ int status;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
+
+ req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
+ vcfg->strip_tag1_en ? 1 : 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
+ vcfg->strip_tag2_en ? 1 : 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
+ vcfg->vlan1_vlan_prionly ? 1 : 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
+ vcfg->vlan2_vlan_prionly ? 1 : 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B,
+ vcfg->strip_tag1_discard_en ? 1 : 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B,
+ vcfg->strip_tag2_discard_en ? 1 : 0);
+
+ req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
+ bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
+ HCLGE_VF_NUM_PER_BYTE;
+ req->vf_bitmap[bmap_index] =
+ 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
+
+ status = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "Send port rxvlan cfg command fail, ret =%d\n",
+ status);
+
+ return status;
+}
+
+static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
+ u16 port_base_vlan_state,
+ u16 vlan_tag, u8 qos)
+{
+ int ret;
+
+ if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
+ vport->txvlan_cfg.accept_tag1 = true;
+ vport->txvlan_cfg.insert_tag1_en = false;
+ vport->txvlan_cfg.default_tag1 = 0;
+ } else {
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev);
+
+ vport->txvlan_cfg.accept_tag1 =
+ ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3;
+ vport->txvlan_cfg.insert_tag1_en = true;
+ vport->txvlan_cfg.default_tag1 = (qos << VLAN_PRIO_SHIFT) |
+ vlan_tag;
+ }
+
+ vport->txvlan_cfg.accept_untag1 = true;
+
+ /* accept_tag2 and accept_untag2 are not supported on
+ * pdev revision(0x20), new revision support them,
+ * this two fields can not be configured by user.
+ */
+ vport->txvlan_cfg.accept_tag2 = true;
+ vport->txvlan_cfg.accept_untag2 = true;
+ vport->txvlan_cfg.insert_tag2_en = false;
+ vport->txvlan_cfg.default_tag2 = 0;
+ vport->txvlan_cfg.tag_shift_mode_en = true;
+
+ if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
+ vport->rxvlan_cfg.strip_tag1_en = false;
+ vport->rxvlan_cfg.strip_tag2_en =
+ vport->rxvlan_cfg.rx_vlan_offload_en;
+ vport->rxvlan_cfg.strip_tag2_discard_en = false;
+ } else {
+ vport->rxvlan_cfg.strip_tag1_en =
+ vport->rxvlan_cfg.rx_vlan_offload_en;
+ vport->rxvlan_cfg.strip_tag2_en = true;
+ vport->rxvlan_cfg.strip_tag2_discard_en = true;
+ }
+
+ vport->rxvlan_cfg.strip_tag1_discard_en = false;
+ vport->rxvlan_cfg.vlan1_vlan_prionly = false;
+ vport->rxvlan_cfg.vlan2_vlan_prionly = false;
+
+ ret = hclge_set_vlan_tx_offload_cfg(vport);
+ if (ret)
+ return ret;
+
+ return hclge_set_vlan_rx_offload_cfg(vport);
+}
+
+static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
+{
+ struct hclge_rx_vlan_type_cfg_cmd *rx_req;
+ struct hclge_tx_vlan_type_cfg_cmd *tx_req;
+ struct hclge_desc desc;
+ int status;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
+ rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
+ rx_req->ot_fst_vlan_type =
+ cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
+ rx_req->ot_sec_vlan_type =
+ cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
+ rx_req->in_fst_vlan_type =
+ cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
+ rx_req->in_sec_vlan_type =
+ cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
+
+ status = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "Send rxvlan protocol type command fail, ret =%d\n",
+ status);
+ return status;
+ }
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
+
+ tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
+ tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
+ tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
+
+ status = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "Send txvlan protocol type command fail, ret =%d\n",
+ status);
+
+ return status;
+}
+
+static int hclge_init_vlan_filter(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport;
+ int ret;
+ int i;
+
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
+ return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
+ HCLGE_FILTER_FE_EGRESS_V1_B,
+ true, 0);
+
+ /* for revision 0x21, vf vlan filter is per function */
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ vport = &hdev->vport[i];
+ ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
+ HCLGE_FILTER_FE_EGRESS, true,
+ vport->vport_id);
+ if (ret)
+ return ret;
+ vport->cur_vlan_fltr_en = true;
+ }
+
+ return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
+ HCLGE_FILTER_FE_INGRESS, true, 0);
+}
+
+static int hclge_init_vlan_type(struct hclge_dev *hdev)
+{
+ hdev->vlan_type_cfg.rx_in_fst_vlan_type = ETH_P_8021Q;
+ hdev->vlan_type_cfg.rx_in_sec_vlan_type = ETH_P_8021Q;
+ hdev->vlan_type_cfg.rx_ot_fst_vlan_type = ETH_P_8021Q;
+ hdev->vlan_type_cfg.rx_ot_sec_vlan_type = ETH_P_8021Q;
+ hdev->vlan_type_cfg.tx_ot_vlan_type = ETH_P_8021Q;
+ hdev->vlan_type_cfg.tx_in_vlan_type = ETH_P_8021Q;
+
+ return hclge_set_vlan_protocol_type(hdev);
+}
+
+static int hclge_init_vport_vlan_offload(struct hclge_dev *hdev)
+{
+ struct hclge_port_base_vlan_config *cfg;
+ struct hclge_vport *vport;
+ int ret;
+ int i;
+
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ vport = &hdev->vport[i];
+ cfg = &vport->port_base_vlan_cfg;
+
+ ret = hclge_vlan_offload_cfg(vport, cfg->state,
+ cfg->vlan_info.vlan_tag,
+ cfg->vlan_info.qos);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static int hclge_init_vlan_config(struct hclge_dev *hdev)
+{
+ struct hnae3_handle *handle = &hdev->vport[0].nic;
+ int ret;
+
+ ret = hclge_init_vlan_filter(hdev);
+ if (ret)
+ return ret;
+
+ ret = hclge_init_vlan_type(hdev);
+ if (ret)
+ return ret;
+
+ ret = hclge_init_vport_vlan_offload(hdev);
+ if (ret)
+ return ret;
+
+ return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
+}
+
+static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
+ bool writen_to_tbl)
+{
+ struct hclge_vport_vlan_cfg *vlan, *tmp;
+ struct hclge_dev *hdev = vport->back;
+
+ mutex_lock(&hdev->vport_lock);
+
+ list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
+ if (vlan->vlan_id == vlan_id) {
+ mutex_unlock(&hdev->vport_lock);
+ return;
+ }
+ }
+
+ vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
+ if (!vlan) {
+ mutex_unlock(&hdev->vport_lock);
+ return;
+ }
+
+ vlan->hd_tbl_status = writen_to_tbl;
+ vlan->vlan_id = vlan_id;
+
+ list_add_tail(&vlan->node, &vport->vlan_list);
+ mutex_unlock(&hdev->vport_lock);
+}
+
+static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
+{
+ struct hclge_vport_vlan_cfg *vlan, *tmp;
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ mutex_lock(&hdev->vport_lock);
+
+ list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
+ if (!vlan->hd_tbl_status) {
+ ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
+ vport->vport_id,
+ vlan->vlan_id, false);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "restore vport vlan list failed, ret=%d\n",
+ ret);
+
+ mutex_unlock(&hdev->vport_lock);
+ return ret;
+ }
+ }
+ vlan->hd_tbl_status = true;
+ }
+
+ mutex_unlock(&hdev->vport_lock);
+
+ return 0;
+}
+
+static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
+ bool is_write_tbl)
+{
+ struct hclge_vport_vlan_cfg *vlan, *tmp;
+ struct hclge_dev *hdev = vport->back;
+
+ list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
+ if (vlan->vlan_id == vlan_id) {
+ if (is_write_tbl && vlan->hd_tbl_status)
+ hclge_set_vlan_filter_hw(hdev,
+ htons(ETH_P_8021Q),
+ vport->vport_id,
+ vlan_id,
+ true);
+
+ list_del(&vlan->node);
+ kfree(vlan);
+ break;
+ }
+ }
+}
+
+void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
+{
+ struct hclge_vport_vlan_cfg *vlan, *tmp;
+ struct hclge_dev *hdev = vport->back;
+
+ mutex_lock(&hdev->vport_lock);
+
+ list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
+ if (vlan->hd_tbl_status)
+ hclge_set_vlan_filter_hw(hdev,
+ htons(ETH_P_8021Q),
+ vport->vport_id,
+ vlan->vlan_id,
+ true);
+
+ vlan->hd_tbl_status = false;
+ if (is_del_list) {
+ list_del(&vlan->node);
+ kfree(vlan);
+ }
+ }
+ clear_bit(vport->vport_id, hdev->vf_vlan_full);
+ mutex_unlock(&hdev->vport_lock);
+}
+
+void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
+{
+ struct hclge_vport_vlan_cfg *vlan, *tmp;
+ struct hclge_vport *vport;
+ int i;
+
+ mutex_lock(&hdev->vport_lock);
+
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ vport = &hdev->vport[i];
+ list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
+ list_del(&vlan->node);
+ kfree(vlan);
+ }
+ }
+
+ mutex_unlock(&hdev->vport_lock);
+}
+
+void hclge_restore_vport_port_base_vlan_config(struct hclge_dev *hdev)
+{
+ struct hclge_vlan_info *vlan_info;
+ struct hclge_vport *vport;
+ u16 vlan_proto;
+ u16 vlan_id;
+ u16 state;
+ int vf_id;
+ int ret;
+
+ /* PF should restore all vfs port base vlan */
+ for (vf_id = 0; vf_id < hdev->num_alloc_vfs; vf_id++) {
+ vport = &hdev->vport[vf_id + HCLGE_VF_VPORT_START_NUM];
+ vlan_info = vport->port_base_vlan_cfg.tbl_sta ?
+ &vport->port_base_vlan_cfg.vlan_info :
+ &vport->port_base_vlan_cfg.old_vlan_info;
+
+ vlan_id = vlan_info->vlan_tag;
+ vlan_proto = vlan_info->vlan_proto;
+ state = vport->port_base_vlan_cfg.state;
+
+ if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
+ clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
+ ret = hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
+ vport->vport_id,
+ vlan_id, false);
+ vport->port_base_vlan_cfg.tbl_sta = ret == 0;
+ }
+ }
+}
+
+void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
+{
+ struct hclge_vport_vlan_cfg *vlan, *tmp;
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ mutex_lock(&hdev->vport_lock);
+
+ if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
+ list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
+ ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
+ vport->vport_id,
+ vlan->vlan_id, false);
+ if (ret)
+ break;
+ vlan->hd_tbl_status = true;
+ }
+ }
+
+ mutex_unlock(&hdev->vport_lock);
+}
+
+/* For global reset and imp reset, hardware will clear the mac table,
+ * so we change the mac address state from ACTIVE to TO_ADD, then they
+ * can be restored in the service task after reset complete. Furtherly,
+ * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
+ * be restored after reset, so just remove these mac nodes from mac_list.
+ */
+static void hclge_mac_node_convert_for_reset(struct list_head *list)
+{
+ struct hclge_mac_node *mac_node, *tmp;
+
+ list_for_each_entry_safe(mac_node, tmp, list, node) {
+ if (mac_node->state == HCLGE_MAC_ACTIVE) {
+ mac_node->state = HCLGE_MAC_TO_ADD;
+ } else if (mac_node->state == HCLGE_MAC_TO_DEL) {
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ }
+ }
+}
+
+void hclge_restore_mac_table_common(struct hclge_vport *vport)
+{
+ spin_lock_bh(&vport->mac_list_lock);
+
+ hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
+ hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
+ set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
+
+ spin_unlock_bh(&vport->mac_list_lock);
+}
+
+static void hclge_restore_hw_table(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport = &hdev->vport[0];
+ struct hnae3_handle *handle = &vport->nic;
+
+ hclge_restore_mac_table_common(vport);
+ hclge_restore_vport_port_base_vlan_config(hdev);
+ hclge_restore_vport_vlan_table(vport);
+ set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
+ hclge_restore_fd_entries(handle);
+}
+
+int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+
+ if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
+ vport->rxvlan_cfg.strip_tag1_en = false;
+ vport->rxvlan_cfg.strip_tag2_en = enable;
+ vport->rxvlan_cfg.strip_tag2_discard_en = false;
+ } else {
+ vport->rxvlan_cfg.strip_tag1_en = enable;
+ vport->rxvlan_cfg.strip_tag2_en = true;
+ vport->rxvlan_cfg.strip_tag2_discard_en = true;
+ }
+
+ vport->rxvlan_cfg.strip_tag1_discard_en = false;
+ vport->rxvlan_cfg.vlan1_vlan_prionly = false;
+ vport->rxvlan_cfg.vlan2_vlan_prionly = false;
+ vport->rxvlan_cfg.rx_vlan_offload_en = enable;
+
+ return hclge_set_vlan_rx_offload_cfg(vport);
+}
+
+static void hclge_set_vport_vlan_fltr_change(struct hclge_vport *vport)
+{
+ struct hclge_dev *hdev = vport->back;
+
+ if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
+ set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, &vport->state);
+}
+
+static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
+ u16 port_base_vlan_state,
+ struct hclge_vlan_info *new_info,
+ struct hclge_vlan_info *old_info)
+{
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
+ hclge_rm_vport_all_vlan_table(vport, false);
+ /* force clear VLAN 0 */
+ ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, true, 0);
+ if (ret)
+ return ret;
+ return hclge_set_vlan_filter_hw(hdev,
+ htons(new_info->vlan_proto),
+ vport->vport_id,
+ new_info->vlan_tag,
+ false);
+ }
+
+ vport->port_base_vlan_cfg.tbl_sta = false;
+
+ /* force add VLAN 0 */
+ ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, false, 0);
+ if (ret)
+ return ret;
+
+ ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
+ vport->vport_id, old_info->vlan_tag,
+ true);
+ if (ret)
+ return ret;
+
+ return hclge_add_vport_all_vlan_table(vport);
+}
+
+static bool hclge_need_update_vlan_filter(const struct hclge_vlan_info *new_cfg,
+ const struct hclge_vlan_info *old_cfg)
+{
+ if (new_cfg->vlan_tag != old_cfg->vlan_tag)
+ return true;
+
+ if (new_cfg->vlan_tag == 0 && (new_cfg->qos == 0 || old_cfg->qos == 0))
+ return true;
+
+ return false;
+}
+
+static int hclge_modify_port_base_vlan_tag(struct hclge_vport *vport,
+ struct hclge_vlan_info *new_info,
+ struct hclge_vlan_info *old_info)
+{
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ /* add new VLAN tag */
+ ret = hclge_set_vlan_filter_hw(hdev, htons(new_info->vlan_proto),
+ vport->vport_id, new_info->vlan_tag,
+ false);
+ if (ret)
+ return ret;
+
+ vport->port_base_vlan_cfg.tbl_sta = false;
+ /* remove old VLAN tag */
+ if (old_info->vlan_tag == 0)
+ ret = hclge_set_vf_vlan_common(hdev, vport->vport_id,
+ true, 0);
+ else
+ ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
+ vport->vport_id,
+ old_info->vlan_tag, true);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to clear vport%u port base vlan %u, ret = %d.\n",
+ vport->vport_id, old_info->vlan_tag, ret);
+
+ return ret;
+}
+
+int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
+ struct hclge_vlan_info *vlan_info)
+{
+ struct hnae3_handle *nic = &vport->nic;
+ struct hclge_vlan_info *old_vlan_info;
+ int ret;
+
+ old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
+
+ ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag,
+ vlan_info->qos);
+ if (ret)
+ return ret;
+
+ if (!hclge_need_update_vlan_filter(vlan_info, old_vlan_info))
+ goto out;
+
+ if (state == HNAE3_PORT_BASE_VLAN_MODIFY)
+ ret = hclge_modify_port_base_vlan_tag(vport, vlan_info,
+ old_vlan_info);
+ else
+ ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
+ old_vlan_info);
+ if (ret)
+ return ret;
+
+out:
+ vport->port_base_vlan_cfg.state = state;
+ if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
+ nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
+ else
+ nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
+
+ vport->port_base_vlan_cfg.old_vlan_info = *old_vlan_info;
+ vport->port_base_vlan_cfg.vlan_info = *vlan_info;
+ vport->port_base_vlan_cfg.tbl_sta = true;
+ hclge_set_vport_vlan_fltr_change(vport);
+
+ return 0;
+}
+
+static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
+ enum hnae3_port_base_vlan_state state,
+ u16 vlan, u8 qos)
+{
+ if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
+ if (!vlan && !qos)
+ return HNAE3_PORT_BASE_VLAN_NOCHANGE;
+
+ return HNAE3_PORT_BASE_VLAN_ENABLE;
+ }
+
+ if (!vlan && !qos)
+ return HNAE3_PORT_BASE_VLAN_DISABLE;
+
+ if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan &&
+ vport->port_base_vlan_cfg.vlan_info.qos == qos)
+ return HNAE3_PORT_BASE_VLAN_NOCHANGE;
+
+ return HNAE3_PORT_BASE_VLAN_MODIFY;
+}
+
+static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
+ u16 vlan, u8 qos, __be16 proto)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_vlan_info vlan_info;
+ u16 state;
+ int ret;
+
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
+ return -EOPNOTSUPP;
+
+ vport = hclge_get_vf_vport(hdev, vfid);
+ if (!vport)
+ return -EINVAL;
+
+ /* qos is a 3 bits value, so can not be bigger than 7 */
+ if (vlan > VLAN_N_VID - 1 || qos > 7)
+ return -EINVAL;
+ if (proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+
+ state = hclge_get_port_base_vlan_state(vport,
+ vport->port_base_vlan_cfg.state,
+ vlan, qos);
+ if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
+ return 0;
+
+ vlan_info.vlan_tag = vlan;
+ vlan_info.qos = qos;
+ vlan_info.vlan_proto = ntohs(proto);
+
+ ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to update port base vlan for vf %d, ret = %d\n",
+ vfid, ret);
+ return ret;
+ }
+
+ /* there is a timewindow for PF to know VF unalive, it may
+ * cause send mailbox fail, but it doesn't matter, VF will
+ * query it when reinit.
+ * for DEVICE_VERSION_V3, vf doesn't need to know about the port based
+ * VLAN state.
+ */
+ if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
+ if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
+ (void)hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
+ vport->vport_id,
+ state,
+ &vlan_info);
+ else
+ set_bit(HCLGE_VPORT_NEED_NOTIFY_VF_VLAN,
+ &vport->need_notify);
+ }
+ return 0;
+}
+
+static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
+{
+ struct hclge_vlan_info *vlan_info;
+ struct hclge_vport *vport;
+ int ret;
+ int vf;
+
+ /* clear port base vlan for all vf */
+ for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
+ vport = &hdev->vport[vf];
+ vlan_info = &vport->port_base_vlan_cfg.vlan_info;
+
+ ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
+ vport->vport_id,
+ vlan_info->vlan_tag, true);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to clear vf vlan for vf%d, ret = %d\n",
+ vf - HCLGE_VF_VPORT_START_NUM, ret);
+ }
+}
+
+int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
+ u16 vlan_id, bool is_kill)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ bool writen_to_tbl = false;
+ int ret = 0;
+
+ /* When device is resetting or reset failed, firmware is unable to
+ * handle mailbox. Just record the vlan id, and remove it after
+ * reset finished.
+ */
+ mutex_lock(&hdev->vport_lock);
+ if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
+ test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
+ set_bit(vlan_id, vport->vlan_del_fail_bmap);
+ mutex_unlock(&hdev->vport_lock);
+ return -EBUSY;
+ } else if (!is_kill && test_bit(vlan_id, vport->vlan_del_fail_bmap)) {
+ clear_bit(vlan_id, vport->vlan_del_fail_bmap);
+ }
+ mutex_unlock(&hdev->vport_lock);
+
+ /* when port base vlan enabled, we use port base vlan as the vlan
+ * filter entry. In this case, we don't update vlan filter table
+ * when user add new vlan or remove exist vlan, just update the vport
+ * vlan list. The vlan id in vlan list will be writen in vlan filter
+ * table until port base vlan disabled
+ */
+ if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
+ ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
+ vlan_id, is_kill);
+ writen_to_tbl = true;
+ }
+
+ if (!ret) {
+ if (!is_kill) {
+ hclge_add_vport_vlan_table(vport, vlan_id,
+ writen_to_tbl);
+ } else if (is_kill && vlan_id != 0) {
+ mutex_lock(&hdev->vport_lock);
+ hclge_rm_vport_vlan_table(vport, vlan_id, false);
+ mutex_unlock(&hdev->vport_lock);
+ }
+ } else if (is_kill) {
+ /* when remove hw vlan filter failed, record the vlan id,
+ * and try to remove it from hw later, to be consistence
+ * with stack
+ */
+ mutex_lock(&hdev->vport_lock);
+ set_bit(vlan_id, vport->vlan_del_fail_bmap);
+ mutex_unlock(&hdev->vport_lock);
+ }
+
+ hclge_set_vport_vlan_fltr_change(vport);
+
+ return ret;
+}
+
+static void hclge_sync_vlan_fltr_state(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport;
+ int ret;
+ u16 i;
+
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ vport = &hdev->vport[i];
+ if (!test_and_clear_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
+ &vport->state))
+ continue;
+
+ ret = hclge_enable_vport_vlan_filter(vport,
+ vport->req_vlan_fltr_en);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to sync vlan filter state for vport%u, ret = %d\n",
+ vport->vport_id, ret);
+ set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
+ &vport->state);
+ return;
+ }
+ }
+}
+
+static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
+{
+#define HCLGE_MAX_SYNC_COUNT 60
+
+ int i, ret, sync_cnt = 0;
+ u16 vlan_id;
+
+ mutex_lock(&hdev->vport_lock);
+ /* start from vport 1 for PF is always alive */
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ struct hclge_vport *vport = &hdev->vport[i];
+
+ vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
+ VLAN_N_VID);
+ while (vlan_id != VLAN_N_VID) {
+ ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
+ vport->vport_id, vlan_id,
+ true);
+ if (ret && ret != -EINVAL) {
+ mutex_unlock(&hdev->vport_lock);
+ return;
+ }
+
+ clear_bit(vlan_id, vport->vlan_del_fail_bmap);
+ hclge_rm_vport_vlan_table(vport, vlan_id, false);
+ hclge_set_vport_vlan_fltr_change(vport);
+
+ sync_cnt++;
+ if (sync_cnt >= HCLGE_MAX_SYNC_COUNT) {
+ mutex_unlock(&hdev->vport_lock);
+ return;
+ }
+
+ vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
+ VLAN_N_VID);
+ }
+ }
+ mutex_unlock(&hdev->vport_lock);
+
+ hclge_sync_vlan_fltr_state(hdev);
+}
+
+static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
+{
+ struct hclge_config_max_frm_size_cmd *req;
+ struct hclge_desc desc;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
+
+ req = (struct hclge_config_max_frm_size_cmd *)desc.data;
+ req->max_frm_size = cpu_to_le16(new_mps);
+ req->min_frm_size = HCLGE_MAC_MIN_FRAME;
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
+static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+
+ return hclge_set_vport_mtu(vport, new_mtu);
+}
+
+int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
+{
+ struct hclge_dev *hdev = vport->back;
+ int i, max_frm_size, ret;
+
+ /* HW supprt 2 layer vlan */
+ max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
+ if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
+ max_frm_size > hdev->ae_dev->dev_specs.max_frm_size)
+ return -EINVAL;
+
+ max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
+ mutex_lock(&hdev->vport_lock);
+ /* VF's mps must fit within hdev->mps */
+ if (vport->vport_id && max_frm_size > hdev->mps) {
+ mutex_unlock(&hdev->vport_lock);
+ return -EINVAL;
+ } else if (vport->vport_id) {
+ vport->mps = max_frm_size;
+ mutex_unlock(&hdev->vport_lock);
+ return 0;
+ }
+
+ /* PF's mps must be greater then VF's mps */
+ for (i = 1; i < hdev->num_alloc_vport; i++)
+ if (max_frm_size < hdev->vport[i].mps) {
+ dev_err(&hdev->pdev->dev,
+ "failed to set pf mtu for less than vport %d, mps = %u.\n",
+ i, hdev->vport[i].mps);
+ mutex_unlock(&hdev->vport_lock);
+ return -EINVAL;
+ }
+
+ hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
+
+ ret = hclge_set_mac_mtu(hdev, max_frm_size);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Change mtu fail, ret =%d\n", ret);
+ goto out;
+ }
+
+ hdev->mps = max_frm_size;
+ vport->mps = max_frm_size;
+
+ ret = hclge_buffer_alloc(hdev);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "Allocate buffer fail, ret =%d\n", ret);
+
+out:
+ hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+ mutex_unlock(&hdev->vport_lock);
+ return ret;
+}
+
+static int hclge_reset_tqp_cmd_send(struct hclge_dev *hdev, u16 queue_id,
+ bool enable)
+{
+ struct hclge_reset_tqp_queue_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
+
+ req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
+ req->tqp_id = cpu_to_le16(queue_id);
+ if (enable)
+ hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Send tqp reset cmd error, status =%d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id,
+ u8 *reset_status)
+{
+ struct hclge_reset_tqp_queue_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
+
+ req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
+ req->tqp_id = cpu_to_le16(queue_id);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get reset status error, status =%d\n", ret);
+ return ret;
+ }
+
+ *reset_status = hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
+
+ return 0;
+}
+
+u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
+{
+ struct hclge_comm_tqp *tqp;
+ struct hnae3_queue *queue;
+
+ queue = handle->kinfo.tqp[queue_id];
+ tqp = container_of(queue, struct hclge_comm_tqp, q);
+
+ return tqp->index;
+}
+
+static int hclge_reset_tqp_cmd(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ u16 reset_try_times = 0;
+ u8 reset_status;
+ u16 queue_gid;
+ int ret;
+ u16 i;
+
+ for (i = 0; i < handle->kinfo.num_tqps; i++) {
+ queue_gid = hclge_covert_handle_qid_global(handle, i);
+ ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, true);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to send reset tqp cmd, ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
+ ret = hclge_get_reset_status(hdev, queue_gid,
+ &reset_status);
+ if (ret)
+ return ret;
+
+ if (reset_status)
+ break;
+
+ /* Wait for tqp hw reset */
+ usleep_range(1000, 1200);
+ }
+
+ if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
+ dev_err(&hdev->pdev->dev,
+ "wait for tqp hw reset timeout\n");
+ return -ETIME;
+ }
+
+ ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, false);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to deassert soft reset, ret = %d\n",
+ ret);
+ return ret;
+ }
+ reset_try_times = 0;
+ }
+ return 0;
+}
+
+static int hclge_reset_rcb(struct hnae3_handle *handle)
+{
+#define HCLGE_RESET_RCB_NOT_SUPPORT 0U
+#define HCLGE_RESET_RCB_SUCCESS 1U
+
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_reset_cmd *req;
+ struct hclge_desc desc;
+ u8 return_status;
+ u16 queue_gid;
+ int ret;
+
+ queue_gid = hclge_covert_handle_qid_global(handle, 0);
+
+ req = (struct hclge_reset_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
+ hnae3_set_bit(req->fun_reset_rcb, HCLGE_CFG_RESET_RCB_B, 1);
+ req->fun_reset_rcb_vqid_start = cpu_to_le16(queue_gid);
+ req->fun_reset_rcb_vqid_num = cpu_to_le16(handle->kinfo.num_tqps);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to send rcb reset cmd, ret = %d\n", ret);
+ return ret;
+ }
+
+ return_status = req->fun_reset_rcb_return_status;
+ if (return_status == HCLGE_RESET_RCB_SUCCESS)
+ return 0;
+
+ if (return_status != HCLGE_RESET_RCB_NOT_SUPPORT) {
+ dev_err(&hdev->pdev->dev, "failed to reset rcb, ret = %u\n",
+ return_status);
+ return -EIO;
+ }
+
+ /* if reset rcb cmd is unsupported, we need to send reset tqp cmd
+ * again to reset all tqps
+ */
+ return hclge_reset_tqp_cmd(handle);
+}
+
+int hclge_reset_tqp(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ /* only need to disable PF's tqp */
+ if (!vport->vport_id) {
+ ret = hclge_tqp_enable(handle, false);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to disable tqp, ret = %d\n", ret);
+ return ret;
+ }
+ }
+
+ return hclge_reset_rcb(handle);
+}
+
+static u32 hclge_get_fw_version(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ return hdev->fw_version;
+}
+
+static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
+{
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+
+ if (!phydev)
+ return;
+
+ phy_set_asym_pause(phydev, rx_en, tx_en);
+}
+
+static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
+{
+ int ret;
+
+ if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
+ return 0;
+
+ ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "configure pauseparam error, ret = %d.\n", ret);
+
+ return ret;
+}
+
+int hclge_cfg_flowctrl(struct hclge_dev *hdev)
+{
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+ u16 remote_advertising = 0;
+ u16 local_advertising;
+ u32 rx_pause, tx_pause;
+ u8 flowctl;
+
+ if (!phydev->link)
+ return 0;
+
+ if (!phydev->autoneg)
+ return hclge_mac_pause_setup_hw(hdev);
+
+ local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
+
+ if (phydev->pause)
+ remote_advertising = LPA_PAUSE_CAP;
+
+ if (phydev->asym_pause)
+ remote_advertising |= LPA_PAUSE_ASYM;
+
+ flowctl = mii_resolve_flowctrl_fdx(local_advertising,
+ remote_advertising);
+ tx_pause = flowctl & FLOW_CTRL_TX;
+ rx_pause = flowctl & FLOW_CTRL_RX;
+
+ if (phydev->duplex == HCLGE_MAC_HALF) {
+ tx_pause = 0;
+ rx_pause = 0;
+ }
+
+ return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
+}
+
+static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
+ u32 *rx_en, u32 *tx_en)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ u8 media_type = hdev->hw.mac.media_type;
+
+ *auto_neg = (media_type == HNAE3_MEDIA_TYPE_COPPER) ?
+ hclge_get_autoneg(handle) : 0;
+
+ if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
+ *rx_en = 0;
+ *tx_en = 0;
+ return;
+ }
+
+ if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
+ *rx_en = 1;
+ *tx_en = 0;
+ } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
+ *tx_en = 1;
+ *rx_en = 0;
+ } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
+ *rx_en = 1;
+ *tx_en = 1;
+ } else {
+ *rx_en = 0;
+ *tx_en = 0;
+ }
+}
+
+static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
+ u32 rx_en, u32 tx_en)
+{
+ if (rx_en && tx_en)
+ hdev->fc_mode_last_time = HCLGE_FC_FULL;
+ else if (rx_en && !tx_en)
+ hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
+ else if (!rx_en && tx_en)
+ hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
+ else
+ hdev->fc_mode_last_time = HCLGE_FC_NONE;
+
+ hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
+}
+
+static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
+ u32 rx_en, u32 tx_en)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+ u32 fc_autoneg;
+
+ if (phydev || hnae3_dev_phy_imp_supported(hdev)) {
+ fc_autoneg = hclge_get_autoneg(handle);
+ if (auto_neg != fc_autoneg) {
+ dev_info(&hdev->pdev->dev,
+ "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
+ dev_info(&hdev->pdev->dev,
+ "Priority flow control enabled. Cannot set link flow control.\n");
+ return -EOPNOTSUPP;
+ }
+
+ hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
+
+ hclge_record_user_pauseparam(hdev, rx_en, tx_en);
+
+ if (!auto_neg || hnae3_dev_phy_imp_supported(hdev))
+ return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
+
+ if (phydev)
+ return phy_start_aneg(phydev);
+
+ return -EOPNOTSUPP;
+}
+
+static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
+ u8 *auto_neg, u32 *speed, u8 *duplex, u32 *lane_num)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ if (speed)
+ *speed = hdev->hw.mac.speed;
+ if (duplex)
+ *duplex = hdev->hw.mac.duplex;
+ if (auto_neg)
+ *auto_neg = hdev->hw.mac.autoneg;
+ if (lane_num)
+ *lane_num = hdev->hw.mac.lane_num;
+}
+
+static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
+ u8 *module_type)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ /* When nic is down, the service task is not running, doesn't update
+ * the port information per second. Query the port information before
+ * return the media type, ensure getting the correct media information.
+ */
+ hclge_update_port_info(hdev);
+
+ if (media_type)
+ *media_type = hdev->hw.mac.media_type;
+
+ if (module_type)
+ *module_type = hdev->hw.mac.module_type;
+}
+
+static void hclge_get_mdix_mode(struct hnae3_handle *handle,
+ u8 *tp_mdix_ctrl, u8 *tp_mdix)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+ int mdix_ctrl, mdix, is_resolved;
+ unsigned int retval;
+
+ if (!phydev) {
+ *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
+ *tp_mdix = ETH_TP_MDI_INVALID;
+ return;
+ }
+
+ phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
+
+ retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
+ mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
+ HCLGE_PHY_MDIX_CTRL_S);
+
+ retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
+ mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
+ is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
+
+ phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
+
+ switch (mdix_ctrl) {
+ case 0x0:
+ *tp_mdix_ctrl = ETH_TP_MDI;
+ break;
+ case 0x1:
+ *tp_mdix_ctrl = ETH_TP_MDI_X;
+ break;
+ case 0x3:
+ *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
+ break;
+ default:
+ *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
+ break;
+ }
+
+ if (!is_resolved)
+ *tp_mdix = ETH_TP_MDI_INVALID;
+ else if (mdix)
+ *tp_mdix = ETH_TP_MDI_X;
+ else
+ *tp_mdix = ETH_TP_MDI;
+}
+
+static void hclge_info_show(struct hclge_dev *hdev)
+{
+ struct hnae3_handle *handle = &hdev->vport->nic;
+ struct device *dev = &hdev->pdev->dev;
+
+ dev_info(dev, "PF info begin:\n");
+
+ dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
+ dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
+ dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
+ dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
+ dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
+ dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
+ dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
+ dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
+ dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
+ dev_info(dev, "This is %s PF\n",
+ hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
+ dev_info(dev, "DCB %s\n",
+ handle->kinfo.tc_info.dcb_ets_active ? "enable" : "disable");
+ dev_info(dev, "MQPRIO %s\n",
+ handle->kinfo.tc_info.mqprio_active ? "enable" : "disable");
+ dev_info(dev, "Default tx spare buffer size: %u\n",
+ hdev->tx_spare_buf_size);
+
+ dev_info(dev, "PF info end.\n");
+}
+
+static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
+ struct hclge_vport *vport)
+{
+ struct hnae3_client *client = vport->nic.client;
+ struct hclge_dev *hdev = ae_dev->priv;
+ int rst_cnt = hdev->rst_stats.reset_cnt;
+ int ret;
+
+ ret = client->ops->init_instance(&vport->nic);
+ if (ret)
+ return ret;
+
+ set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
+ if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
+ rst_cnt != hdev->rst_stats.reset_cnt) {
+ ret = -EBUSY;
+ goto init_nic_err;
+ }
+
+ /* Enable nic hw error interrupts */
+ ret = hclge_config_nic_hw_error(hdev, true);
+ if (ret) {
+ dev_err(&ae_dev->pdev->dev,
+ "fail(%d) to enable hw error interrupts\n", ret);
+ goto init_nic_err;
+ }
+
+ hnae3_set_client_init_flag(client, ae_dev, 1);
+
+ if (netif_msg_drv(&hdev->vport->nic))
+ hclge_info_show(hdev);
+
+ return ret;
+
+init_nic_err:
+ clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
+ while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+ msleep(HCLGE_WAIT_RESET_DONE);
+
+ client->ops->uninit_instance(&vport->nic, 0);
+
+ return ret;
+}
+
+static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
+ struct hclge_vport *vport)
+{
+ struct hclge_dev *hdev = ae_dev->priv;
+ struct hnae3_client *client;
+ int rst_cnt;
+ int ret;
+
+ if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
+ !hdev->nic_client)
+ return 0;
+
+ client = hdev->roce_client;
+ ret = hclge_init_roce_base_info(vport);
+ if (ret)
+ return ret;
+
+ rst_cnt = hdev->rst_stats.reset_cnt;
+ ret = client->ops->init_instance(&vport->roce);
+ if (ret)
+ return ret;
+
+ set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
+ if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
+ rst_cnt != hdev->rst_stats.reset_cnt) {
+ ret = -EBUSY;
+ goto init_roce_err;
+ }
+
+ /* Enable roce ras interrupts */
+ ret = hclge_config_rocee_ras_interrupt(hdev, true);
+ if (ret) {
+ dev_err(&ae_dev->pdev->dev,
+ "fail(%d) to enable roce ras interrupts\n", ret);
+ goto init_roce_err;
+ }
+
+ hnae3_set_client_init_flag(client, ae_dev, 1);
+
+ return 0;
+
+init_roce_err:
+ clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
+ while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+ msleep(HCLGE_WAIT_RESET_DONE);
+
+ hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
+
+ return ret;
+}
+
+static int hclge_init_client_instance(struct hnae3_client *client,
+ struct hnae3_ae_dev *ae_dev)
+{
+ struct hclge_dev *hdev = ae_dev->priv;
+ struct hclge_vport *vport = &hdev->vport[0];
+ int ret;
+
+ switch (client->type) {
+ case HNAE3_CLIENT_KNIC:
+ hdev->nic_client = client;
+ vport->nic.client = client;
+ ret = hclge_init_nic_client_instance(ae_dev, vport);
+ if (ret)
+ goto clear_nic;
+
+ ret = hclge_init_roce_client_instance(ae_dev, vport);
+ if (ret)
+ goto clear_roce;
+
+ break;
+ case HNAE3_CLIENT_ROCE:
+ if (hnae3_dev_roce_supported(hdev)) {
+ hdev->roce_client = client;
+ vport->roce.client = client;
+ }
+
+ ret = hclge_init_roce_client_instance(ae_dev, vport);
+ if (ret)
+ goto clear_roce;
+
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+
+clear_nic:
+ hdev->nic_client = NULL;
+ vport->nic.client = NULL;
+ return ret;
+clear_roce:
+ hdev->roce_client = NULL;
+ vport->roce.client = NULL;
+ return ret;
+}
+
+static void hclge_uninit_client_instance(struct hnae3_client *client,
+ struct hnae3_ae_dev *ae_dev)
+{
+ struct hclge_dev *hdev = ae_dev->priv;
+ struct hclge_vport *vport = &hdev->vport[0];
+
+ if (hdev->roce_client) {
+ clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
+ while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+ msleep(HCLGE_WAIT_RESET_DONE);
+
+ hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
+ hdev->roce_client = NULL;
+ vport->roce.client = NULL;
+ }
+ if (client->type == HNAE3_CLIENT_ROCE)
+ return;
+ if (hdev->nic_client && client->ops->uninit_instance) {
+ clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
+ while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+ msleep(HCLGE_WAIT_RESET_DONE);
+
+ client->ops->uninit_instance(&vport->nic, 0);
+ hdev->nic_client = NULL;
+ vport->nic.client = NULL;
+ }
+}
+
+static int hclge_dev_mem_map(struct hclge_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ struct hclge_hw *hw = &hdev->hw;
+
+ /* for device does not have device memory, return directly */
+ if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR)))
+ return 0;
+
+ hw->hw.mem_base =
+ devm_ioremap_wc(&pdev->dev,
+ pci_resource_start(pdev, HCLGE_MEM_BAR),
+ pci_resource_len(pdev, HCLGE_MEM_BAR));
+ if (!hw->hw.mem_base) {
+ dev_err(&pdev->dev, "failed to map device memory\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int hclge_pci_init(struct hclge_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ struct hclge_hw *hw;
+ int ret;
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable PCI device\n");
+ return ret;
+ }
+
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(&pdev->dev,
+ "can't set consistent PCI DMA");
+ goto err_disable_device;
+ }
+ dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
+ }
+
+ ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
+ if (ret) {
+ dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
+ goto err_disable_device;
+ }
+
+ pci_set_master(pdev);
+ hw = &hdev->hw;
+ hw->hw.io_base = pcim_iomap(pdev, 2, 0);
+ if (!hw->hw.io_base) {
+ dev_err(&pdev->dev, "Can't map configuration register space\n");
+ ret = -ENOMEM;
+ goto err_release_regions;
+ }
+
+ ret = hclge_dev_mem_map(hdev);
+ if (ret)
+ goto err_unmap_io_base;
+
+ hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
+
+ return 0;
+
+err_unmap_io_base:
+ pcim_iounmap(pdev, hdev->hw.hw.io_base);
+err_release_regions:
+ pci_release_regions(pdev);
+err_disable_device:
+ pci_disable_device(pdev);
+
+ return ret;
+}
+
+static void hclge_pci_uninit(struct hclge_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+
+ if (hdev->hw.hw.mem_base)
+ devm_iounmap(&pdev->dev, hdev->hw.hw.mem_base);
+
+ pcim_iounmap(pdev, hdev->hw.hw.io_base);
+ pci_free_irq_vectors(pdev);
+ pci_release_mem_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+static void hclge_state_init(struct hclge_dev *hdev)
+{
+ set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
+ set_bit(HCLGE_STATE_DOWN, &hdev->state);
+ clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
+ clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
+ clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
+ clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
+ clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
+}
+
+static void hclge_state_uninit(struct hclge_dev *hdev)
+{
+ set_bit(HCLGE_STATE_DOWN, &hdev->state);
+ set_bit(HCLGE_STATE_REMOVING, &hdev->state);
+
+ if (hdev->reset_timer.function)
+ del_timer_sync(&hdev->reset_timer);
+ if (hdev->service_task.work.func)
+ cancel_delayed_work_sync(&hdev->service_task);
+}
+
+static void hclge_reset_prepare_general(struct hnae3_ae_dev *ae_dev,
+ enum hnae3_reset_type rst_type)
+{
+#define HCLGE_RESET_RETRY_WAIT_MS 500
+#define HCLGE_RESET_RETRY_CNT 5
+
+ struct hclge_dev *hdev = ae_dev->priv;
+ int retry_cnt = 0;
+ int ret;
+
+ while (retry_cnt++ < HCLGE_RESET_RETRY_CNT) {
+ down(&hdev->reset_sem);
+ set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
+ hdev->reset_type = rst_type;
+ ret = hclge_reset_prepare(hdev);
+ if (!ret && !hdev->reset_pending)
+ break;
+
+ dev_err(&hdev->pdev->dev,
+ "failed to prepare to reset, ret=%d, reset_pending:0x%lx, retry_cnt:%d\n",
+ ret, hdev->reset_pending, retry_cnt);
+ clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
+ up(&hdev->reset_sem);
+ msleep(HCLGE_RESET_RETRY_WAIT_MS);
+ }
+
+ /* disable misc vector before reset done */
+ hclge_enable_vector(&hdev->misc_vector, false);
+ set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
+
+ if (hdev->reset_type == HNAE3_FLR_RESET)
+ hdev->rst_stats.flr_rst_cnt++;
+}
+
+static void hclge_reset_done(struct hnae3_ae_dev *ae_dev)
+{
+ struct hclge_dev *hdev = ae_dev->priv;
+ int ret;
+
+ hclge_enable_vector(&hdev->misc_vector, true);
+
+ ret = hclge_reset_rebuild(hdev);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
+
+ hdev->reset_type = HNAE3_NONE_RESET;
+ clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
+ up(&hdev->reset_sem);
+}
+
+static void hclge_clear_resetting_state(struct hclge_dev *hdev)
+{
+ u16 i;
+
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ struct hclge_vport *vport = &hdev->vport[i];
+ int ret;
+
+ /* Send cmd to clear vport's FUNC_RST_ING */
+ ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
+ if (ret)
+ dev_warn(&hdev->pdev->dev,
+ "clear vport(%u) rst failed %d!\n",
+ vport->vport_id, ret);
+ }
+}
+
+static int hclge_clear_hw_resource(struct hclge_dev *hdev)
+{
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CLEAR_HW_RESOURCE, false);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ /* This new command is only supported by new firmware, it will
+ * fail with older firmware. Error value -EOPNOSUPP can only be
+ * returned by older firmware running this command, to keep code
+ * backward compatible we will override this value and return
+ * success.
+ */
+ if (ret && ret != -EOPNOTSUPP) {
+ dev_err(&hdev->pdev->dev,
+ "failed to clear hw resource, ret = %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static void hclge_init_rxd_adv_layout(struct hclge_dev *hdev)
+{
+ if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
+ hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 1);
+}
+
+static void hclge_uninit_rxd_adv_layout(struct hclge_dev *hdev)
+{
+ if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
+ hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 0);
+}
+
+static struct hclge_wol_info *hclge_get_wol_info(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+
+ return &vport->back->hw.mac.wol;
+}
+
+static int hclge_get_wol_supported_mode(struct hclge_dev *hdev,
+ u32 *wol_supported)
+{
+ struct hclge_query_wol_supported_cmd *wol_supported_cmd;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_WOL_GET_SUPPORTED_MODE,
+ true);
+ wol_supported_cmd = (struct hclge_query_wol_supported_cmd *)desc.data;
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to query wol supported, ret = %d\n", ret);
+ return ret;
+ }
+
+ *wol_supported = le32_to_cpu(wol_supported_cmd->supported_wake_mode);
+
+ return 0;
+}
+
+static int hclge_set_wol_cfg(struct hclge_dev *hdev,
+ struct hclge_wol_info *wol_info)
+{
+ struct hclge_wol_cfg_cmd *wol_cfg_cmd;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_WOL_CFG, false);
+ wol_cfg_cmd = (struct hclge_wol_cfg_cmd *)desc.data;
+ wol_cfg_cmd->wake_on_lan_mode = cpu_to_le32(wol_info->wol_current_mode);
+ wol_cfg_cmd->sopass_size = wol_info->wol_sopass_size;
+ memcpy(wol_cfg_cmd->sopass, wol_info->wol_sopass, SOPASS_MAX);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to set wol config, ret = %d\n", ret);
+
+ return ret;
+}
+
+static int hclge_update_wol(struct hclge_dev *hdev)
+{
+ struct hclge_wol_info *wol_info = &hdev->hw.mac.wol;
+
+ if (!hnae3_ae_dev_wol_supported(hdev->ae_dev))
+ return 0;
+
+ return hclge_set_wol_cfg(hdev, wol_info);
+}
+
+static int hclge_init_wol(struct hclge_dev *hdev)
+{
+ struct hclge_wol_info *wol_info = &hdev->hw.mac.wol;
+ int ret;
+
+ if (!hnae3_ae_dev_wol_supported(hdev->ae_dev))
+ return 0;
+
+ memset(wol_info, 0, sizeof(struct hclge_wol_info));
+ ret = hclge_get_wol_supported_mode(hdev,
+ &wol_info->wol_support_mode);
+ if (ret) {
+ wol_info->wol_support_mode = 0;
+ return ret;
+ }
+
+ return hclge_update_wol(hdev);
+}
+
+static void hclge_get_wol(struct hnae3_handle *handle,
+ struct ethtool_wolinfo *wol)
+{
+ struct hclge_wol_info *wol_info = hclge_get_wol_info(handle);
+
+ wol->supported = wol_info->wol_support_mode;
+ wol->wolopts = wol_info->wol_current_mode;
+ if (wol_info->wol_current_mode & WAKE_MAGICSECURE)
+ memcpy(wol->sopass, wol_info->wol_sopass, SOPASS_MAX);
+}
+
+static int hclge_set_wol(struct hnae3_handle *handle,
+ struct ethtool_wolinfo *wol)
+{
+ struct hclge_wol_info *wol_info = hclge_get_wol_info(handle);
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ u32 wol_mode;
+ int ret;
+
+ wol_mode = wol->wolopts;
+ if (wol_mode & ~wol_info->wol_support_mode)
+ return -EINVAL;
+
+ wol_info->wol_current_mode = wol_mode;
+ if (wol_mode & WAKE_MAGICSECURE) {
+ memcpy(wol_info->wol_sopass, wol->sopass, SOPASS_MAX);
+ wol_info->wol_sopass_size = SOPASS_MAX;
+ } else {
+ wol_info->wol_sopass_size = 0;
+ }
+
+ ret = hclge_set_wol_cfg(vport->back, wol_info);
+ if (ret)
+ wol_info->wol_current_mode = 0;
+
+ return ret;
+}
+
+static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
+{
+ struct pci_dev *pdev = ae_dev->pdev;
+ struct hclge_dev *hdev;
+ int ret;
+
+ hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
+ if (!hdev)
+ return -ENOMEM;
+
+ hdev->pdev = pdev;
+ hdev->ae_dev = ae_dev;
+ hdev->reset_type = HNAE3_NONE_RESET;
+ hdev->reset_level = HNAE3_FUNC_RESET;
+ ae_dev->priv = hdev;
+
+ /* HW supprt 2 layer vlan */
+ hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
+
+ mutex_init(&hdev->vport_lock);
+ spin_lock_init(&hdev->fd_rule_lock);
+ sema_init(&hdev->reset_sem, 1);
+
+ ret = hclge_pci_init(hdev);
+ if (ret)
+ goto out;
+
+ ret = hclge_devlink_init(hdev);
+ if (ret)
+ goto err_pci_uninit;
+
+ /* Firmware command queue initialize */
+ ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw);
+ if (ret)
+ goto err_devlink_uninit;
+
+ /* Firmware command initialize */
+ ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, &hdev->fw_version,
+ true, hdev->reset_pending);
+ if (ret)
+ goto err_cmd_uninit;
+
+ ret = hclge_clear_hw_resource(hdev);
+ if (ret)
+ goto err_cmd_uninit;
+
+ ret = hclge_get_cap(hdev);
+ if (ret)
+ goto err_cmd_uninit;
+
+ ret = hclge_query_dev_specs(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
+ ret);
+ goto err_cmd_uninit;
+ }
+
+ ret = hclge_configure(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
+ goto err_cmd_uninit;
+ }
+
+ ret = hclge_init_msi(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
+ goto err_cmd_uninit;
+ }
+
+ ret = hclge_misc_irq_init(hdev);
+ if (ret)
+ goto err_msi_uninit;
+
+ ret = hclge_alloc_tqps(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
+ goto err_msi_irq_uninit;
+ }
+
+ ret = hclge_alloc_vport(hdev);
+ if (ret)
+ goto err_msi_irq_uninit;
+
+ ret = hclge_map_tqp(hdev);
+ if (ret)
+ goto err_msi_irq_uninit;
+
+ if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
+ clear_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps);
+ if (hnae3_dev_phy_imp_supported(hdev))
+ ret = hclge_update_tp_port_info(hdev);
+ else
+ ret = hclge_mac_mdio_config(hdev);
+
+ if (ret)
+ goto err_msi_irq_uninit;
+ }
+
+ ret = hclge_init_umv_space(hdev);
+ if (ret)
+ goto err_mdiobus_unreg;
+
+ ret = hclge_mac_init(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
+ goto err_mdiobus_unreg;
+ }
+
+ ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
+ if (ret) {
+ dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
+ goto err_mdiobus_unreg;
+ }
+
+ ret = hclge_config_gro(hdev);
+ if (ret)
+ goto err_mdiobus_unreg;
+
+ ret = hclge_init_vlan_config(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
+ goto err_mdiobus_unreg;
+ }
+
+ ret = hclge_tm_schd_init(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
+ goto err_mdiobus_unreg;
+ }
+
+ ret = hclge_comm_rss_init_cfg(&hdev->vport->nic, hdev->ae_dev,
+ &hdev->rss_cfg);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
+ goto err_mdiobus_unreg;
+ }
+
+ ret = hclge_rss_init_hw(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
+ goto err_mdiobus_unreg;
+ }
+
+ ret = init_mgr_tbl(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
+ goto err_mdiobus_unreg;
+ }
+
+ ret = hclge_init_fd_config(hdev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "fd table init fail, ret=%d\n", ret);
+ goto err_mdiobus_unreg;
+ }
+
+ ret = hclge_ptp_init(hdev);
+ if (ret)
+ goto err_mdiobus_unreg;
+
+ ret = hclge_update_port_info(hdev);
+ if (ret)
+ goto err_mdiobus_unreg;
+
+ INIT_KFIFO(hdev->mac_tnl_log);
+
+ hclge_dcb_ops_set(hdev);
+
+ timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
+ INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
+
+ hclge_clear_all_event_cause(hdev);
+ hclge_clear_resetting_state(hdev);
+
+ /* Log and clear the hw errors those already occurred */
+ if (hnae3_dev_ras_imp_supported(hdev))
+ hclge_handle_occurred_error(hdev);
+ else
+ hclge_handle_all_hns_hw_errors(ae_dev);
+
+ /* request delayed reset for the error recovery because an immediate
+ * global reset on a PF affecting pending initialization of other PFs
+ */
+ if (ae_dev->hw_err_reset_req) {
+ enum hnae3_reset_type reset_level;
+
+ reset_level = hclge_get_reset_level(ae_dev,
+ &ae_dev->hw_err_reset_req);
+ hclge_set_def_reset_request(ae_dev, reset_level);
+ mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
+ }
+
+ hclge_init_rxd_adv_layout(hdev);
+
+ /* Enable MISC vector(vector0) */
+ hclge_enable_vector(&hdev->misc_vector, true);
+
+ ret = hclge_init_wol(hdev);
+ if (ret)
+ dev_warn(&pdev->dev,
+ "failed to wake on lan init, ret = %d\n", ret);
+
+ hclge_state_init(hdev);
+ hdev->last_reset_time = jiffies;
+
+ dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
+ HCLGE_DRIVER_NAME);
+
+ hclge_task_schedule(hdev, round_jiffies_relative(HZ));
+
+ return 0;
+
+err_mdiobus_unreg:
+ if (hdev->hw.mac.phydev)
+ mdiobus_unregister(hdev->hw.mac.mdio_bus);
+err_msi_irq_uninit:
+ hclge_misc_irq_uninit(hdev);
+err_msi_uninit:
+ pci_free_irq_vectors(pdev);
+err_cmd_uninit:
+ hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
+err_devlink_uninit:
+ hclge_devlink_uninit(hdev);
+err_pci_uninit:
+ pcim_iounmap(pdev, hdev->hw.hw.io_base);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+out:
+ mutex_destroy(&hdev->vport_lock);
+ return ret;
+}
+
+static void hclge_stats_clear(struct hclge_dev *hdev)
+{
+ memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
+ memset(&hdev->fec_stats, 0, sizeof(hdev->fec_stats));
+}
+
+static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
+{
+ return hclge_config_switch_param(hdev, vf, enable,
+ HCLGE_SWITCH_ANTI_SPOOF_MASK);
+}
+
+static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
+{
+ return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
+ HCLGE_FILTER_FE_NIC_INGRESS_B,
+ enable, vf);
+}
+
+static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
+{
+ int ret;
+
+ ret = hclge_set_mac_spoofchk(hdev, vf, enable);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Set vf %d mac spoof check %s failed, ret=%d\n",
+ vf, enable ? "on" : "off", ret);
+ return ret;
+ }
+
+ ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "Set vf %d vlan spoof check %s failed, ret=%d\n",
+ vf, enable ? "on" : "off", ret);
+
+ return ret;
+}
+
+static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
+ bool enable)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ u32 new_spoofchk = enable ? 1 : 0;
+ int ret;
+
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
+ return -EOPNOTSUPP;
+
+ vport = hclge_get_vf_vport(hdev, vf);
+ if (!vport)
+ return -EINVAL;
+
+ if (vport->vf_info.spoofchk == new_spoofchk)
+ return 0;
+
+ if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
+ dev_warn(&hdev->pdev->dev,
+ "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
+ vf);
+ else if (enable && hclge_is_umv_space_full(vport, true))
+ dev_warn(&hdev->pdev->dev,
+ "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
+ vf);
+
+ ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
+ if (ret)
+ return ret;
+
+ vport->vf_info.spoofchk = new_spoofchk;
+ return 0;
+}
+
+static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport = hdev->vport;
+ int ret;
+ int i;
+
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
+ return 0;
+
+ /* resume the vf spoof check state after reset */
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
+ vport->vf_info.spoofchk);
+ if (ret)
+ return ret;
+
+ vport++;
+ }
+
+ return 0;
+}
+
+static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ u32 new_trusted = enable ? 1 : 0;
+
+ vport = hclge_get_vf_vport(hdev, vf);
+ if (!vport)
+ return -EINVAL;
+
+ if (vport->vf_info.trusted == new_trusted)
+ return 0;
+
+ vport->vf_info.trusted = new_trusted;
+ set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
+ hclge_task_schedule(hdev, 0);
+
+ return 0;
+}
+
+static void hclge_reset_vf_rate(struct hclge_dev *hdev)
+{
+ int ret;
+ int vf;
+
+ /* reset vf rate to default value */
+ for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
+ struct hclge_vport *vport = &hdev->vport[vf];
+
+ vport->vf_info.max_tx_rate = 0;
+ ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "vf%d failed to reset to default, ret=%d\n",
+ vf - HCLGE_VF_VPORT_START_NUM, ret);
+ }
+}
+
+static int hclge_vf_rate_param_check(struct hclge_dev *hdev,
+ int min_tx_rate, int max_tx_rate)
+{
+ if (min_tx_rate != 0 ||
+ max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
+ dev_err(&hdev->pdev->dev,
+ "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
+ min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
+ int min_tx_rate, int max_tx_rate, bool force)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ ret = hclge_vf_rate_param_check(hdev, min_tx_rate, max_tx_rate);
+ if (ret)
+ return ret;
+
+ vport = hclge_get_vf_vport(hdev, vf);
+ if (!vport)
+ return -EINVAL;
+
+ if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
+ return 0;
+
+ ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
+ if (ret)
+ return ret;
+
+ vport->vf_info.max_tx_rate = max_tx_rate;
+
+ return 0;
+}
+
+static int hclge_resume_vf_rate(struct hclge_dev *hdev)
+{
+ struct hnae3_handle *handle = &hdev->vport->nic;
+ struct hclge_vport *vport;
+ int ret;
+ int vf;
+
+ /* resume the vf max_tx_rate after reset */
+ for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
+ vport = hclge_get_vf_vport(hdev, vf);
+ if (!vport)
+ return -EINVAL;
+
+ /* zero means max rate, after reset, firmware already set it to
+ * max rate, so just continue.
+ */
+ if (!vport->vf_info.max_tx_rate)
+ continue;
+
+ ret = hclge_set_vf_rate(handle, vf, 0,
+ vport->vf_info.max_tx_rate, true);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "vf%d failed to resume tx_rate:%u, ret=%d\n",
+ vf, vport->vf_info.max_tx_rate, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void hclge_reset_vport_state(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport = hdev->vport;
+ int i;
+
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
+ vport++;
+ }
+}
+
+static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
+{
+ struct hclge_dev *hdev = ae_dev->priv;
+ struct pci_dev *pdev = ae_dev->pdev;
+ int ret;
+
+ set_bit(HCLGE_STATE_DOWN, &hdev->state);
+
+ hclge_stats_clear(hdev);
+ /* NOTE: pf reset needn't to clear or restore pf and vf table entry.
+ * so here should not clean table in memory.
+ */
+ if (hdev->reset_type == HNAE3_IMP_RESET ||
+ hdev->reset_type == HNAE3_GLOBAL_RESET) {
+ memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
+ memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
+ bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
+ hclge_reset_umv_space(hdev);
+ }
+
+ ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, &hdev->fw_version,
+ true, hdev->reset_pending);
+ if (ret) {
+ dev_err(&pdev->dev, "Cmd queue init failed\n");
+ return ret;
+ }
+
+ ret = hclge_map_tqp(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
+ return ret;
+ }
+
+ ret = hclge_mac_init(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
+ return ret;
+ }
+
+ ret = hclge_tp_port_init(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to init tp port, ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
+ if (ret) {
+ dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
+ return ret;
+ }
+
+ ret = hclge_config_gro(hdev);
+ if (ret)
+ return ret;
+
+ ret = hclge_init_vlan_config(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
+ return ret;
+ }
+
+ ret = hclge_tm_init_hw(hdev, true);
+ if (ret) {
+ dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
+ return ret;
+ }
+
+ ret = hclge_rss_init_hw(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
+ return ret;
+ }
+
+ ret = init_mgr_tbl(hdev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to reinit manager table, ret = %d\n", ret);
+ return ret;
+ }
+
+ ret = hclge_init_fd_config(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
+ return ret;
+ }
+
+ ret = hclge_ptp_init(hdev);
+ if (ret)
+ return ret;
+
+ /* Log and clear the hw errors those already occurred */
+ if (hnae3_dev_ras_imp_supported(hdev))
+ hclge_handle_occurred_error(hdev);
+ else
+ hclge_handle_all_hns_hw_errors(ae_dev);
+
+ /* Re-enable the hw error interrupts because
+ * the interrupts get disabled on global reset.
+ */
+ ret = hclge_config_nic_hw_error(hdev, true);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "fail(%d) to re-enable NIC hw error interrupts\n",
+ ret);
+ return ret;
+ }
+
+ if (hdev->roce_client) {
+ ret = hclge_config_rocee_ras_interrupt(hdev, true);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "fail(%d) to re-enable roce ras interrupts\n",
+ ret);
+ return ret;
+ }
+ }
+
+ hclge_reset_vport_state(hdev);
+ ret = hclge_reset_vport_spoofchk(hdev);
+ if (ret)
+ return ret;
+
+ ret = hclge_resume_vf_rate(hdev);
+ if (ret)
+ return ret;
+
+ hclge_init_rxd_adv_layout(hdev);
+
+ ret = hclge_update_wol(hdev);
+ if (ret)
+ dev_warn(&pdev->dev,
+ "failed to update wol config, ret = %d\n", ret);
+
+ dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
+ HCLGE_DRIVER_NAME);
+
+ return 0;
+}
+
+static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
+{
+ struct hclge_dev *hdev = ae_dev->priv;
+ struct hclge_mac *mac = &hdev->hw.mac;
+
+ hclge_reset_vf_rate(hdev);
+ hclge_clear_vf_vlan(hdev);
+ hclge_state_uninit(hdev);
+ hclge_ptp_uninit(hdev);
+ hclge_uninit_rxd_adv_layout(hdev);
+ hclge_uninit_mac_table(hdev);
+ hclge_del_all_fd_entries(hdev);
+
+ if (mac->phydev)
+ mdiobus_unregister(mac->mdio_bus);
+
+ /* Disable MISC vector(vector0) */
+ hclge_enable_vector(&hdev->misc_vector, false);
+ synchronize_irq(hdev->misc_vector.vector_irq);
+
+ /* Disable all hw interrupts */
+ hclge_config_mac_tnl_int(hdev, false);
+ hclge_config_nic_hw_error(hdev, false);
+ hclge_config_rocee_ras_interrupt(hdev, false);
+
+ hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
+ hclge_misc_irq_uninit(hdev);
+ hclge_devlink_uninit(hdev);
+ hclge_pci_uninit(hdev);
+ hclge_uninit_vport_vlan_table(hdev);
+ mutex_destroy(&hdev->vport_lock);
+ ae_dev->priv = NULL;
+}
+
+static u32 hclge_get_max_channels(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps);
+}
+
+static void hclge_get_channels(struct hnae3_handle *handle,
+ struct ethtool_channels *ch)
+{
+ ch->max_combined = hclge_get_max_channels(handle);
+ ch->other_count = 1;
+ ch->max_other = 1;
+ ch->combined_count = handle->kinfo.rss_size;
+}
+
+static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
+ u16 *alloc_tqps, u16 *max_rss_size)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ *alloc_tqps = vport->alloc_tqps;
+ *max_rss_size = hdev->pf_rss_size_max;
+}
+
+static int hclge_set_rss_tc_mode_cfg(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
+ struct hclge_dev *hdev = vport->back;
+ u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
+ u16 tc_valid[HCLGE_MAX_TC_NUM];
+ u16 roundup_size;
+ unsigned int i;
+
+ roundup_size = roundup_pow_of_two(vport->nic.kinfo.rss_size);
+ roundup_size = ilog2(roundup_size);
+ /* Set the RSS TC mode according to the new RSS size */
+ for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
+ tc_valid[i] = 0;
+
+ if (!(hdev->hw_tc_map & BIT(i)))
+ continue;
+
+ tc_valid[i] = 1;
+ tc_size[i] = roundup_size;
+ tc_offset[i] = vport->nic.kinfo.rss_size * i;
+ }
+
+ return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, tc_valid,
+ tc_size);
+}
+
+static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
+ bool rxfh_configured)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
+ struct hclge_dev *hdev = vport->back;
+ u16 cur_rss_size = kinfo->rss_size;
+ u16 cur_tqps = kinfo->num_tqps;
+ u32 *rss_indir;
+ unsigned int i;
+ int ret;
+
+ kinfo->req_rss_size = new_tqps_num;
+
+ ret = hclge_tm_vport_map_update(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
+ return ret;
+ }
+
+ ret = hclge_set_rss_tc_mode_cfg(handle);
+ if (ret)
+ return ret;
+
+ /* RSS indirection table has been configured by user */
+ if (rxfh_configured)
+ goto out;
+
+ /* Reinitializes the rss indirect table according to the new RSS size */
+ rss_indir = kcalloc(ae_dev->dev_specs.rss_ind_tbl_size, sizeof(u32),
+ GFP_KERNEL);
+ if (!rss_indir)
+ return -ENOMEM;
+
+ for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
+ rss_indir[i] = i % kinfo->rss_size;
+
+ ret = hclge_set_rss(handle, rss_indir, NULL, 0);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
+ ret);
+
+ kfree(rss_indir);
+
+out:
+ if (!ret)
+ dev_info(&hdev->pdev->dev,
+ "Channels changed, rss_size from %u to %u, tqps from %u to %u",
+ cur_rss_size, kinfo->rss_size,
+ cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
+
+ return ret;
+}
+
+static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
+{
+ struct hclge_set_led_state_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
+
+ req = (struct hclge_set_led_state_cmd *)desc.data;
+ hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
+ HCLGE_LED_LOCATE_STATE_S, locate_led_status);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "Send set led state cmd error, ret =%d\n", ret);
+
+ return ret;
+}
+
+enum hclge_led_status {
+ HCLGE_LED_OFF,
+ HCLGE_LED_ON,
+ HCLGE_LED_NO_CHANGE = 0xFF,
+};
+
+static int hclge_set_led_id(struct hnae3_handle *handle,
+ enum ethtool_phys_id_state status)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ switch (status) {
+ case ETHTOOL_ID_ACTIVE:
+ return hclge_set_led_status(hdev, HCLGE_LED_ON);
+ case ETHTOOL_ID_INACTIVE:
+ return hclge_set_led_status(hdev, HCLGE_LED_OFF);
+ default:
+ return -EINVAL;
+ }
+}
+
+static void hclge_get_link_mode(struct hnae3_handle *handle,
+ unsigned long *supported,
+ unsigned long *advertising)
+{
+ unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ unsigned int idx = 0;
+
+ for (; idx < size; idx++) {
+ supported[idx] = hdev->hw.mac.supported[idx];
+ advertising[idx] = hdev->hw.mac.advertising[idx];
+ }
+}
+
+static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ bool gro_en_old = hdev->gro_en;
+ int ret;
+
+ hdev->gro_en = enable;
+ ret = hclge_config_gro(hdev);
+ if (ret)
+ hdev->gro_en = gro_en_old;
+
+ return ret;
+}
+
+static int hclge_sync_vport_promisc_mode(struct hclge_vport *vport)
+{
+ struct hnae3_handle *handle = &vport->nic;
+ struct hclge_dev *hdev = vport->back;
+ bool uc_en = false;
+ bool mc_en = false;
+ u8 tmp_flags;
+ bool bc_en;
+ int ret;
+
+ if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
+ set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
+ vport->last_promisc_flags = vport->overflow_promisc_flags;
+ }
+
+ if (!test_and_clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
+ &vport->state))
+ return 0;
+
+ /* for PF */
+ if (!vport->vport_id) {
+ tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
+ ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
+ tmp_flags & HNAE3_MPE);
+ if (!ret)
+ set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
+ &vport->state);
+ else
+ set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
+ &vport->state);
+ return ret;
+ }
+
+ /* for VF */
+ if (vport->vf_info.trusted) {
+ uc_en = vport->vf_info.request_uc_en > 0 ||
+ vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE;
+ mc_en = vport->vf_info.request_mc_en > 0 ||
+ vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE;
+ }
+ bc_en = vport->vf_info.request_bc_en > 0;
+
+ ret = hclge_cmd_set_promisc_mode(hdev, vport->vport_id, uc_en,
+ mc_en, bc_en);
+ if (ret) {
+ set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
+ return ret;
+ }
+ hclge_set_vport_vlan_fltr_change(vport);
+
+ return 0;
+}
+
+static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport;
+ int ret;
+ u16 i;
+
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ vport = &hdev->vport[i];
+
+ ret = hclge_sync_vport_promisc_mode(vport);
+ if (ret)
+ return;
+ }
+}
+
+static bool hclge_module_existed(struct hclge_dev *hdev)
+{
+ struct hclge_desc desc;
+ u32 existed;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get SFP exist state, ret = %d\n", ret);
+ return false;
+ }
+
+ existed = le32_to_cpu(desc.data[0]);
+
+ return existed != 0;
+}
+
+/* need 6 bds(total 140 bytes) in one reading
+ * return the number of bytes actually read, 0 means read failed.
+ */
+static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
+ u32 len, u8 *data)
+{
+ struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
+ struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
+ u16 read_len;
+ u16 copy_len;
+ int ret;
+ int i;
+
+ /* setup all 6 bds to read module eeprom info. */
+ for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
+ hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
+ true);
+
+ /* bd0~bd4 need next flag */
+ if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
+ desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ }
+
+ /* setup bd0, this bd contains offset and read length. */
+ sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
+ sfp_info_bd0->offset = cpu_to_le16((u16)offset);
+ read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
+ sfp_info_bd0->read_len = cpu_to_le16(read_len);
+
+ ret = hclge_cmd_send(&hdev->hw, desc, i);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get SFP eeprom info, ret = %d\n", ret);
+ return 0;
+ }
+
+ /* copy sfp info from bd0 to out buffer. */
+ copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
+ memcpy(data, sfp_info_bd0->data, copy_len);
+ read_len = copy_len;
+
+ /* copy sfp info from bd1~bd5 to out buffer if needed. */
+ for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
+ if (read_len >= len)
+ return read_len;
+
+ copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
+ memcpy(data + read_len, desc[i].data, copy_len);
+ read_len += copy_len;
+ }
+
+ return read_len;
+}
+
+static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
+ u32 len, u8 *data)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ u32 read_len = 0;
+ u16 data_len;
+
+ if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
+ return -EOPNOTSUPP;
+
+ if (!hclge_module_existed(hdev))
+ return -ENXIO;
+
+ while (read_len < len) {
+ data_len = hclge_get_sfp_eeprom_info(hdev,
+ offset + read_len,
+ len - read_len,
+ data + read_len);
+ if (!data_len)
+ return -EIO;
+
+ read_len += data_len;
+ }
+
+ return 0;
+}
+
+static int hclge_get_link_diagnosis_info(struct hnae3_handle *handle,
+ u32 *status_code)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_desc desc;
+ int ret;
+
+ if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2)
+ return -EOPNOTSUPP;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_DIAGNOSIS, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to query link diagnosis info, ret = %d\n", ret);
+ return ret;
+ }
+
+ *status_code = le32_to_cpu(desc.data[0]);
+ return 0;
+}
+
+/* After disable sriov, VF still has some config and info need clean,
+ * which configed by PF.
+ */
+static void hclge_clear_vport_vf_info(struct hclge_vport *vport, int vfid)
+{
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_vlan_info vlan_info;
+ int ret;
+
+ clear_bit(HCLGE_VPORT_STATE_INITED, &vport->state);
+ clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
+ vport->need_notify = 0;
+ vport->mps = 0;
+
+ /* after disable sriov, clean VF rate configured by PF */
+ ret = hclge_tm_qs_shaper_cfg(vport, 0);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to clean vf%d rate config, ret = %d\n",
+ vfid, ret);
+
+ vlan_info.vlan_tag = 0;
+ vlan_info.qos = 0;
+ vlan_info.vlan_proto = ETH_P_8021Q;
+ ret = hclge_update_port_base_vlan_cfg(vport,
+ HNAE3_PORT_BASE_VLAN_DISABLE,
+ &vlan_info);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to clean vf%d port base vlan, ret = %d\n",
+ vfid, ret);
+
+ ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, false);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to clean vf%d spoof config, ret = %d\n",
+ vfid, ret);
+
+ memset(&vport->vf_info, 0, sizeof(vport->vf_info));
+}
+
+static void hclge_clean_vport_config(struct hnae3_ae_dev *ae_dev, int num_vfs)
+{
+ struct hclge_dev *hdev = ae_dev->priv;
+ struct hclge_vport *vport;
+ int i;
+
+ for (i = 0; i < num_vfs; i++) {
+ vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM];
+
+ hclge_clear_vport_vf_info(vport, i);
+ }
+}
+
+static int hclge_get_dscp_prio(struct hnae3_handle *h, u8 dscp, u8 *tc_mode,
+ u8 *priority)
+{
+ struct hclge_vport *vport = hclge_get_vport(h);
+
+ if (dscp >= HNAE3_MAX_DSCP)
+ return -EINVAL;
+
+ if (tc_mode)
+ *tc_mode = vport->nic.kinfo.tc_map_mode;
+ if (priority)
+ *priority = vport->nic.kinfo.dscp_prio[dscp] == HNAE3_PRIO_ID_INVALID ? 0 :
+ vport->nic.kinfo.dscp_prio[dscp];
+
+ return 0;
+}
+
+static const struct hnae3_ae_ops hclge_ops = {
+ .init_ae_dev = hclge_init_ae_dev,
+ .uninit_ae_dev = hclge_uninit_ae_dev,
+ .reset_prepare = hclge_reset_prepare_general,
+ .reset_done = hclge_reset_done,
+ .init_client_instance = hclge_init_client_instance,
+ .uninit_client_instance = hclge_uninit_client_instance,
+ .map_ring_to_vector = hclge_map_ring_to_vector,
+ .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
+ .get_vector = hclge_get_vector,
+ .put_vector = hclge_put_vector,
+ .set_promisc_mode = hclge_set_promisc_mode,
+ .request_update_promisc_mode = hclge_request_update_promisc_mode,
+ .set_loopback = hclge_set_loopback,
+ .start = hclge_ae_start,
+ .stop = hclge_ae_stop,
+ .client_start = hclge_client_start,
+ .client_stop = hclge_client_stop,
+ .get_status = hclge_get_status,
+ .get_ksettings_an_result = hclge_get_ksettings_an_result,
+ .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
+ .get_media_type = hclge_get_media_type,
+ .check_port_speed = hclge_check_port_speed,
+ .get_fec_stats = hclge_get_fec_stats,
+ .get_fec = hclge_get_fec,
+ .set_fec = hclge_set_fec,
+ .get_rss_key_size = hclge_comm_get_rss_key_size,
+ .get_rss = hclge_get_rss,
+ .set_rss = hclge_set_rss,
+ .set_rss_tuple = hclge_set_rss_tuple,
+ .get_rss_tuple = hclge_get_rss_tuple,
+ .get_tc_size = hclge_get_tc_size,
+ .get_mac_addr = hclge_get_mac_addr,
+ .set_mac_addr = hclge_set_mac_addr,
+ .do_ioctl = hclge_do_ioctl,
+ .add_uc_addr = hclge_add_uc_addr,
+ .rm_uc_addr = hclge_rm_uc_addr,
+ .add_mc_addr = hclge_add_mc_addr,
+ .rm_mc_addr = hclge_rm_mc_addr,
+ .set_autoneg = hclge_set_autoneg,
+ .get_autoneg = hclge_get_autoneg,
+ .restart_autoneg = hclge_restart_autoneg,
+ .halt_autoneg = hclge_halt_autoneg,
+ .get_pauseparam = hclge_get_pauseparam,
+ .set_pauseparam = hclge_set_pauseparam,
+ .set_mtu = hclge_set_mtu,
+ .reset_queue = hclge_reset_tqp,
+ .get_stats = hclge_get_stats,
+ .get_mac_stats = hclge_get_mac_stat,
+ .update_stats = hclge_update_stats,
+ .get_strings = hclge_get_strings,
+ .get_sset_count = hclge_get_sset_count,
+ .get_fw_version = hclge_get_fw_version,
+ .get_mdix_mode = hclge_get_mdix_mode,
+ .enable_vlan_filter = hclge_enable_vlan_filter,
+ .set_vlan_filter = hclge_set_vlan_filter,
+ .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
+ .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
+ .reset_event = hclge_reset_event,
+ .get_reset_level = hclge_get_reset_level,
+ .set_default_reset_request = hclge_set_def_reset_request,
+ .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
+ .set_channels = hclge_set_channels,
+ .get_channels = hclge_get_channels,
+ .get_regs_len = hclge_get_regs_len,
+ .get_regs = hclge_get_regs,
+ .set_led_id = hclge_set_led_id,
+ .get_link_mode = hclge_get_link_mode,
+ .add_fd_entry = hclge_add_fd_entry,
+ .del_fd_entry = hclge_del_fd_entry,
+ .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
+ .get_fd_rule_info = hclge_get_fd_rule_info,
+ .get_fd_all_rules = hclge_get_all_rules,
+ .enable_fd = hclge_enable_fd,
+ .add_arfs_entry = hclge_add_fd_entry_by_arfs,
+ .dbg_read_cmd = hclge_dbg_read_cmd,
+ .handle_hw_ras_error = hclge_handle_hw_ras_error,
+ .get_hw_reset_stat = hclge_get_hw_reset_stat,
+ .ae_dev_resetting = hclge_ae_dev_resetting,
+ .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
+ .set_gro_en = hclge_gro_en,
+ .get_global_queue_id = hclge_covert_handle_qid_global,
+ .set_timer_task = hclge_set_timer_task,
+ .mac_connect_phy = hclge_mac_connect_phy,
+ .mac_disconnect_phy = hclge_mac_disconnect_phy,
+ .get_vf_config = hclge_get_vf_config,
+ .set_vf_link_state = hclge_set_vf_link_state,
+ .set_vf_spoofchk = hclge_set_vf_spoofchk,
+ .set_vf_trust = hclge_set_vf_trust,
+ .set_vf_rate = hclge_set_vf_rate,
+ .set_vf_mac = hclge_set_vf_mac,
+ .get_module_eeprom = hclge_get_module_eeprom,
+ .get_cmdq_stat = hclge_get_cmdq_stat,
+ .add_cls_flower = hclge_add_cls_flower,
+ .del_cls_flower = hclge_del_cls_flower,
+ .cls_flower_active = hclge_is_cls_flower_active,
+ .get_phy_link_ksettings = hclge_get_phy_link_ksettings,
+ .set_phy_link_ksettings = hclge_set_phy_link_ksettings,
+ .set_tx_hwts_info = hclge_ptp_set_tx_info,
+ .get_rx_hwts = hclge_ptp_get_rx_hwts,
+ .get_ts_info = hclge_ptp_get_ts_info,
+ .get_link_diagnosis_info = hclge_get_link_diagnosis_info,
+ .clean_vf_config = hclge_clean_vport_config,
+ .get_dscp_prio = hclge_get_dscp_prio,
+ .get_wol = hclge_get_wol,
+ .set_wol = hclge_set_wol,
+};
+
+static struct hnae3_ae_algo ae_algo = {
+ .ops = &hclge_ops,
+ .pdev_id_table = ae_algo_pci_tbl,
+};
+
+static int __init hclge_init(void)
+{
+ pr_info("%s is initializing\n", HCLGE_NAME);
+
+ hclge_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGE_NAME);
+ if (!hclge_wq) {
+ pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
+ return -ENOMEM;
+ }
+
+ hnae3_register_ae_algo(&ae_algo);
+
+ return 0;
+}
+
+static void __exit hclge_exit(void)
+{
+ hnae3_unregister_ae_algo_prepare(&ae_algo);
+ hnae3_unregister_ae_algo(&ae_algo);
+ destroy_workqueue(hclge_wq);
+}
+module_init(hclge_init);
+module_exit(hclge_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
+MODULE_DESCRIPTION("HCLGE Driver");
+MODULE_VERSION(HCLGE_MOD_VERSION);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
new file mode 100644
index 0000000000..7bc2049b72
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -0,0 +1,1149 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+// Copyright (c) 2016-2017 Hisilicon Limited.
+
+#ifndef __HCLGE_MAIN_H
+#define __HCLGE_MAIN_H
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/phy.h>
+#include <linux/if_vlan.h>
+#include <linux/kfifo.h>
+#include <net/devlink.h>
+
+#include "hclge_cmd.h"
+#include "hclge_ptp.h"
+#include "hnae3.h"
+#include "hclge_comm_rss.h"
+#include "hclge_comm_tqp_stats.h"
+
+#define HCLGE_MOD_VERSION "1.0"
+#define HCLGE_DRIVER_NAME "hclge"
+
+#define HCLGE_MAX_PF_NUM 8
+
+#define HCLGE_VF_VPORT_START_NUM 1
+
+#define HCLGE_RD_FIRST_STATS_NUM 2
+#define HCLGE_RD_OTHER_STATS_NUM 4
+
+#define HCLGE_INVALID_VPORT 0xffff
+
+#define HCLGE_PF_CFG_BLOCK_SIZE 32
+#define HCLGE_PF_CFG_DESC_NUM \
+ (HCLGE_PF_CFG_BLOCK_SIZE / HCLGE_CFG_RD_LEN_BYTES)
+
+#define HCLGE_VECTOR_REG_BASE 0x20000
+#define HCLGE_VECTOR_EXT_REG_BASE 0x30000
+#define HCLGE_MISC_VECTOR_REG_BASE 0x20400
+
+#define HCLGE_VECTOR_REG_OFFSET 0x4
+#define HCLGE_VECTOR_REG_OFFSET_H 0x1000
+#define HCLGE_VECTOR_VF_OFFSET 0x100000
+
+#define HCLGE_NIC_CSQ_DEPTH_REG 0x27008
+
+/* bar registers for common func */
+#define HCLGE_GRO_EN_REG 0x28000
+#define HCLGE_RXD_ADV_LAYOUT_EN_REG 0x28008
+
+/* bar registers for rcb */
+#define HCLGE_RING_RX_ADDR_L_REG 0x80000
+#define HCLGE_RING_RX_ADDR_H_REG 0x80004
+#define HCLGE_RING_RX_BD_NUM_REG 0x80008
+#define HCLGE_RING_RX_BD_LENGTH_REG 0x8000C
+#define HCLGE_RING_RX_MERGE_EN_REG 0x80014
+#define HCLGE_RING_RX_TAIL_REG 0x80018
+#define HCLGE_RING_RX_HEAD_REG 0x8001C
+#define HCLGE_RING_RX_FBD_NUM_REG 0x80020
+#define HCLGE_RING_RX_OFFSET_REG 0x80024
+#define HCLGE_RING_RX_FBD_OFFSET_REG 0x80028
+#define HCLGE_RING_RX_STASH_REG 0x80030
+#define HCLGE_RING_RX_BD_ERR_REG 0x80034
+#define HCLGE_RING_TX_ADDR_L_REG 0x80040
+#define HCLGE_RING_TX_ADDR_H_REG 0x80044
+#define HCLGE_RING_TX_BD_NUM_REG 0x80048
+#define HCLGE_RING_TX_PRIORITY_REG 0x8004C
+#define HCLGE_RING_TX_TC_REG 0x80050
+#define HCLGE_RING_TX_MERGE_EN_REG 0x80054
+#define HCLGE_RING_TX_TAIL_REG 0x80058
+#define HCLGE_RING_TX_HEAD_REG 0x8005C
+#define HCLGE_RING_TX_FBD_NUM_REG 0x80060
+#define HCLGE_RING_TX_OFFSET_REG 0x80064
+#define HCLGE_RING_TX_EBD_NUM_REG 0x80068
+#define HCLGE_RING_TX_EBD_OFFSET_REG 0x80070
+#define HCLGE_RING_TX_BD_ERR_REG 0x80074
+#define HCLGE_RING_EN_REG 0x80090
+
+/* bar registers for tqp interrupt */
+#define HCLGE_TQP_INTR_CTRL_REG 0x20000
+#define HCLGE_TQP_INTR_GL0_REG 0x20100
+#define HCLGE_TQP_INTR_GL1_REG 0x20200
+#define HCLGE_TQP_INTR_GL2_REG 0x20300
+#define HCLGE_TQP_INTR_RL_REG 0x20900
+
+#define HCLGE_RSS_IND_TBL_SIZE 512
+
+#define HCLGE_RSS_TC_SIZE_0 1
+#define HCLGE_RSS_TC_SIZE_1 2
+#define HCLGE_RSS_TC_SIZE_2 4
+#define HCLGE_RSS_TC_SIZE_3 8
+#define HCLGE_RSS_TC_SIZE_4 16
+#define HCLGE_RSS_TC_SIZE_5 32
+#define HCLGE_RSS_TC_SIZE_6 64
+#define HCLGE_RSS_TC_SIZE_7 128
+
+#define HCLGE_UMV_TBL_SIZE 3072
+#define HCLGE_DEFAULT_UMV_SPACE_PER_PF \
+ (HCLGE_UMV_TBL_SIZE / HCLGE_MAX_PF_NUM)
+
+#define HCLGE_TQP_RESET_TRY_TIMES 200
+
+#define HCLGE_PHY_PAGE_MDIX 0
+#define HCLGE_PHY_PAGE_COPPER 0
+
+/* Page Selection Reg. */
+#define HCLGE_PHY_PAGE_REG 22
+
+/* Copper Specific Control Register */
+#define HCLGE_PHY_CSC_REG 16
+
+/* Copper Specific Status Register */
+#define HCLGE_PHY_CSS_REG 17
+
+#define HCLGE_PHY_MDIX_CTRL_S 5
+#define HCLGE_PHY_MDIX_CTRL_M GENMASK(6, 5)
+
+#define HCLGE_PHY_MDIX_STATUS_B 6
+#define HCLGE_PHY_SPEED_DUP_RESOLVE_B 11
+
+#define HCLGE_GET_DFX_REG_TYPE_CNT 4
+
+/* Factor used to calculate offset and bitmap of VF num */
+#define HCLGE_VF_NUM_PER_CMD 64
+
+#define HCLGE_MAX_QSET_NUM 1024
+
+#define HCLGE_DBG_RESET_INFO_LEN 1024
+
+enum HLCGE_PORT_TYPE {
+ HOST_PORT,
+ NETWORK_PORT
+};
+
+#define PF_VPORT_ID 0
+
+#define HCLGE_PF_ID_S 0
+#define HCLGE_PF_ID_M GENMASK(2, 0)
+#define HCLGE_VF_ID_S 3
+#define HCLGE_VF_ID_M GENMASK(10, 3)
+#define HCLGE_PORT_TYPE_B 11
+#define HCLGE_NETWORK_PORT_ID_S 0
+#define HCLGE_NETWORK_PORT_ID_M GENMASK(3, 0)
+
+/* Reset related Registers */
+#define HCLGE_PF_OTHER_INT_REG 0x20600
+#define HCLGE_MISC_RESET_STS_REG 0x20700
+#define HCLGE_MISC_VECTOR_INT_STS 0x20800
+#define HCLGE_GLOBAL_RESET_REG 0x20A00
+#define HCLGE_GLOBAL_RESET_BIT 0
+#define HCLGE_CORE_RESET_BIT 1
+#define HCLGE_IMP_RESET_BIT 2
+#define HCLGE_RESET_INT_M GENMASK(7, 5)
+#define HCLGE_FUN_RST_ING 0x20C00
+#define HCLGE_FUN_RST_ING_B 0
+
+/* Vector0 register bits define */
+#define HCLGE_VECTOR0_REG_PTP_INT_B 0
+#define HCLGE_VECTOR0_GLOBALRESET_INT_B 5
+#define HCLGE_VECTOR0_CORERESET_INT_B 6
+#define HCLGE_VECTOR0_IMPRESET_INT_B 7
+
+/* Vector0 interrupt CMDQ event source register(RW) */
+#define HCLGE_VECTOR0_CMDQ_SRC_REG 0x27100
+/* CMDQ register bits for RX event(=MBX event) */
+#define HCLGE_VECTOR0_RX_CMDQ_INT_B 1
+
+#define HCLGE_VECTOR0_IMP_RESET_INT_B 1
+#define HCLGE_VECTOR0_IMP_CMDQ_ERR_B 4U
+#define HCLGE_VECTOR0_IMP_RD_POISON_B 5U
+#define HCLGE_VECTOR0_ALL_MSIX_ERR_B 6U
+#define HCLGE_TRIGGER_IMP_RESET_B 7U
+
+#define HCLGE_TQP_MEM_SIZE 0x10000
+#define HCLGE_MEM_BAR 4
+/* in the bar4, the first half is for roce, and the second half is for nic */
+#define HCLGE_NIC_MEM_OFFSET(hdev) \
+ (pci_resource_len((hdev)->pdev, HCLGE_MEM_BAR) >> 1)
+#define HCLGE_TQP_MEM_OFFSET(hdev, i) \
+ (HCLGE_NIC_MEM_OFFSET(hdev) + HCLGE_TQP_MEM_SIZE * (i))
+
+#define HCLGE_MAC_DEFAULT_FRAME \
+ (ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN + ETH_DATA_LEN)
+#define HCLGE_MAC_MIN_FRAME 64
+#define HCLGE_MAC_MAX_FRAME 9728
+
+#define HCLGE_SUPPORT_1G_BIT BIT(0)
+#define HCLGE_SUPPORT_10G_BIT BIT(1)
+#define HCLGE_SUPPORT_25G_BIT BIT(2)
+#define HCLGE_SUPPORT_50G_BIT BIT(3)
+#define HCLGE_SUPPORT_100G_BIT BIT(4)
+/* to be compatible with exsit board */
+#define HCLGE_SUPPORT_40G_BIT BIT(5)
+#define HCLGE_SUPPORT_100M_BIT BIT(6)
+#define HCLGE_SUPPORT_10M_BIT BIT(7)
+#define HCLGE_SUPPORT_200G_BIT BIT(8)
+#define HCLGE_SUPPORT_GE \
+ (HCLGE_SUPPORT_1G_BIT | HCLGE_SUPPORT_100M_BIT | HCLGE_SUPPORT_10M_BIT)
+
+enum HCLGE_DEV_STATE {
+ HCLGE_STATE_REINITING,
+ HCLGE_STATE_DOWN,
+ HCLGE_STATE_DISABLED,
+ HCLGE_STATE_REMOVING,
+ HCLGE_STATE_NIC_REGISTERED,
+ HCLGE_STATE_ROCE_REGISTERED,
+ HCLGE_STATE_SERVICE_INITED,
+ HCLGE_STATE_RST_SERVICE_SCHED,
+ HCLGE_STATE_RST_HANDLING,
+ HCLGE_STATE_MBX_SERVICE_SCHED,
+ HCLGE_STATE_MBX_HANDLING,
+ HCLGE_STATE_ERR_SERVICE_SCHED,
+ HCLGE_STATE_STATISTICS_UPDATING,
+ HCLGE_STATE_LINK_UPDATING,
+ HCLGE_STATE_RST_FAIL,
+ HCLGE_STATE_FD_TBL_CHANGED,
+ HCLGE_STATE_FD_CLEAR_ALL,
+ HCLGE_STATE_FD_USER_DEF_CHANGED,
+ HCLGE_STATE_PTP_EN,
+ HCLGE_STATE_PTP_TX_HANDLING,
+ HCLGE_STATE_FEC_STATS_UPDATING,
+ HCLGE_STATE_MAX
+};
+
+enum hclge_evt_cause {
+ HCLGE_VECTOR0_EVENT_RST,
+ HCLGE_VECTOR0_EVENT_MBX,
+ HCLGE_VECTOR0_EVENT_ERR,
+ HCLGE_VECTOR0_EVENT_PTP,
+ HCLGE_VECTOR0_EVENT_OTHER,
+};
+
+enum HCLGE_MAC_SPEED {
+ HCLGE_MAC_SPEED_UNKNOWN = 0, /* unknown */
+ HCLGE_MAC_SPEED_10M = 10, /* 10 Mbps */
+ HCLGE_MAC_SPEED_100M = 100, /* 100 Mbps */
+ HCLGE_MAC_SPEED_1G = 1000, /* 1000 Mbps = 1 Gbps */
+ HCLGE_MAC_SPEED_10G = 10000, /* 10000 Mbps = 10 Gbps */
+ HCLGE_MAC_SPEED_25G = 25000, /* 25000 Mbps = 25 Gbps */
+ HCLGE_MAC_SPEED_40G = 40000, /* 40000 Mbps = 40 Gbps */
+ HCLGE_MAC_SPEED_50G = 50000, /* 50000 Mbps = 50 Gbps */
+ HCLGE_MAC_SPEED_100G = 100000, /* 100000 Mbps = 100 Gbps */
+ HCLGE_MAC_SPEED_200G = 200000 /* 200000 Mbps = 200 Gbps */
+};
+
+enum HCLGE_MAC_DUPLEX {
+ HCLGE_MAC_HALF,
+ HCLGE_MAC_FULL
+};
+
+#define QUERY_SFP_SPEED 0
+#define QUERY_ACTIVE_SPEED 1
+
+struct hclge_wol_info {
+ u32 wol_support_mode; /* store the wake on lan info */
+ u32 wol_current_mode;
+ u8 wol_sopass[SOPASS_MAX];
+ u8 wol_sopass_size;
+};
+
+struct hclge_mac {
+ u8 mac_id;
+ u8 phy_addr;
+ u8 flag;
+ u8 media_type; /* port media type, e.g. fibre/copper/backplane */
+ u8 mac_addr[ETH_ALEN];
+ u8 autoneg;
+ u8 duplex;
+ u8 support_autoneg;
+ u8 speed_type; /* 0: sfp speed, 1: active speed */
+ u8 lane_num;
+ u32 speed;
+ u32 max_speed;
+ u32 speed_ability; /* speed ability supported by current media */
+ u32 module_type; /* sub media type, e.g. kr/cr/sr/lr */
+ u32 fec_mode; /* active fec mode */
+ u32 user_fec_mode;
+ u32 fec_ability;
+ int link; /* store the link status of mac & phy (if phy exists) */
+ struct hclge_wol_info wol;
+ struct phy_device *phydev;
+ struct mii_bus *mdio_bus;
+ phy_interface_t phy_if;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
+};
+
+struct hclge_hw {
+ struct hclge_comm_hw hw;
+ struct hclge_mac mac;
+ int num_vec;
+};
+
+enum hclge_fc_mode {
+ HCLGE_FC_NONE,
+ HCLGE_FC_RX_PAUSE,
+ HCLGE_FC_TX_PAUSE,
+ HCLGE_FC_FULL,
+ HCLGE_FC_PFC,
+ HCLGE_FC_DEFAULT
+};
+
+#define HCLGE_FILTER_TYPE_VF 0
+#define HCLGE_FILTER_TYPE_PORT 1
+#define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
+#define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
+#define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
+#define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
+#define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
+#define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
+ | HCLGE_FILTER_FE_ROCE_EGRESS_B)
+#define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
+ | HCLGE_FILTER_FE_ROCE_INGRESS_B)
+
+enum hclge_vlan_fltr_cap {
+ HCLGE_VLAN_FLTR_DEF,
+ HCLGE_VLAN_FLTR_CAN_MDF,
+};
+enum hclge_link_fail_code {
+ HCLGE_LF_NORMAL,
+ HCLGE_LF_REF_CLOCK_LOST,
+ HCLGE_LF_XSFP_TX_DISABLE,
+ HCLGE_LF_XSFP_ABSENT,
+};
+
+#define HCLGE_LINK_STATUS_DOWN 0
+#define HCLGE_LINK_STATUS_UP 1
+
+#define HCLGE_PG_NUM 4
+#define HCLGE_SCH_MODE_SP 0
+#define HCLGE_SCH_MODE_DWRR 1
+struct hclge_pg_info {
+ u8 pg_id;
+ u8 pg_sch_mode; /* 0: sp; 1: dwrr */
+ u8 tc_bit_map;
+ u32 bw_limit;
+ u8 tc_dwrr[HNAE3_MAX_TC];
+};
+
+struct hclge_tc_info {
+ u8 tc_id;
+ u8 tc_sch_mode; /* 0: sp; 1: dwrr */
+ u8 pgid;
+ u32 bw_limit;
+};
+
+struct hclge_cfg {
+ u8 tc_num;
+ u8 vlan_fliter_cap;
+ u16 tqp_desc_num;
+ u16 rx_buf_len;
+ u16 vf_rss_size_max;
+ u16 pf_rss_size_max;
+ u8 phy_addr;
+ u8 media_type;
+ u8 mac_addr[ETH_ALEN];
+ u8 default_speed;
+ u32 numa_node_map;
+ u32 tx_spare_buf_size;
+ u16 speed_ability;
+ u16 umv_space;
+};
+
+struct hclge_tm_info {
+ u8 num_tc;
+ u8 num_pg; /* It must be 1 if vNET-Base schd */
+ u8 pg_dwrr[HCLGE_PG_NUM];
+ u8 prio_tc[HNAE3_MAX_USER_PRIO];
+ struct hclge_pg_info pg_info[HCLGE_PG_NUM];
+ struct hclge_tc_info tc_info[HNAE3_MAX_TC];
+ enum hclge_fc_mode fc_mode;
+ u8 hw_pfc_map; /* Allow for packet drop or not on this TC */
+ u8 pfc_en; /* PFC enabled or not for user priority */
+};
+
+/* max number of mac statistics on each version */
+#define HCLGE_MAC_STATS_MAX_NUM_V1 87
+#define HCLGE_MAC_STATS_MAX_NUM_V2 105
+
+struct hclge_comm_stats_str {
+ char desc[ETH_GSTRING_LEN];
+ u32 stats_num;
+ unsigned long offset;
+};
+
+/* mac stats ,opcode id: 0x0032 */
+struct hclge_mac_stats {
+ u64 mac_tx_mac_pause_num;
+ u64 mac_rx_mac_pause_num;
+ u64 rsv0;
+ u64 mac_tx_pfc_pri0_pkt_num;
+ u64 mac_tx_pfc_pri1_pkt_num;
+ u64 mac_tx_pfc_pri2_pkt_num;
+ u64 mac_tx_pfc_pri3_pkt_num;
+ u64 mac_tx_pfc_pri4_pkt_num;
+ u64 mac_tx_pfc_pri5_pkt_num;
+ u64 mac_tx_pfc_pri6_pkt_num;
+ u64 mac_tx_pfc_pri7_pkt_num;
+ u64 mac_rx_pfc_pri0_pkt_num;
+ u64 mac_rx_pfc_pri1_pkt_num;
+ u64 mac_rx_pfc_pri2_pkt_num;
+ u64 mac_rx_pfc_pri3_pkt_num;
+ u64 mac_rx_pfc_pri4_pkt_num;
+ u64 mac_rx_pfc_pri5_pkt_num;
+ u64 mac_rx_pfc_pri6_pkt_num;
+ u64 mac_rx_pfc_pri7_pkt_num;
+ u64 mac_tx_total_pkt_num;
+ u64 mac_tx_total_oct_num;
+ u64 mac_tx_good_pkt_num;
+ u64 mac_tx_bad_pkt_num;
+ u64 mac_tx_good_oct_num;
+ u64 mac_tx_bad_oct_num;
+ u64 mac_tx_uni_pkt_num;
+ u64 mac_tx_multi_pkt_num;
+ u64 mac_tx_broad_pkt_num;
+ u64 mac_tx_undersize_pkt_num;
+ u64 mac_tx_oversize_pkt_num;
+ u64 mac_tx_64_oct_pkt_num;
+ u64 mac_tx_65_127_oct_pkt_num;
+ u64 mac_tx_128_255_oct_pkt_num;
+ u64 mac_tx_256_511_oct_pkt_num;
+ u64 mac_tx_512_1023_oct_pkt_num;
+ u64 mac_tx_1024_1518_oct_pkt_num;
+ u64 mac_tx_1519_2047_oct_pkt_num;
+ u64 mac_tx_2048_4095_oct_pkt_num;
+ u64 mac_tx_4096_8191_oct_pkt_num;
+ u64 rsv1;
+ u64 mac_tx_8192_9216_oct_pkt_num;
+ u64 mac_tx_9217_12287_oct_pkt_num;
+ u64 mac_tx_12288_16383_oct_pkt_num;
+ u64 mac_tx_1519_max_good_oct_pkt_num;
+ u64 mac_tx_1519_max_bad_oct_pkt_num;
+
+ u64 mac_rx_total_pkt_num;
+ u64 mac_rx_total_oct_num;
+ u64 mac_rx_good_pkt_num;
+ u64 mac_rx_bad_pkt_num;
+ u64 mac_rx_good_oct_num;
+ u64 mac_rx_bad_oct_num;
+ u64 mac_rx_uni_pkt_num;
+ u64 mac_rx_multi_pkt_num;
+ u64 mac_rx_broad_pkt_num;
+ u64 mac_rx_undersize_pkt_num;
+ u64 mac_rx_oversize_pkt_num;
+ u64 mac_rx_64_oct_pkt_num;
+ u64 mac_rx_65_127_oct_pkt_num;
+ u64 mac_rx_128_255_oct_pkt_num;
+ u64 mac_rx_256_511_oct_pkt_num;
+ u64 mac_rx_512_1023_oct_pkt_num;
+ u64 mac_rx_1024_1518_oct_pkt_num;
+ u64 mac_rx_1519_2047_oct_pkt_num;
+ u64 mac_rx_2048_4095_oct_pkt_num;
+ u64 mac_rx_4096_8191_oct_pkt_num;
+ u64 rsv2;
+ u64 mac_rx_8192_9216_oct_pkt_num;
+ u64 mac_rx_9217_12287_oct_pkt_num;
+ u64 mac_rx_12288_16383_oct_pkt_num;
+ u64 mac_rx_1519_max_good_oct_pkt_num;
+ u64 mac_rx_1519_max_bad_oct_pkt_num;
+
+ u64 mac_tx_fragment_pkt_num;
+ u64 mac_tx_undermin_pkt_num;
+ u64 mac_tx_jabber_pkt_num;
+ u64 mac_tx_err_all_pkt_num;
+ u64 mac_tx_from_app_good_pkt_num;
+ u64 mac_tx_from_app_bad_pkt_num;
+ u64 mac_rx_fragment_pkt_num;
+ u64 mac_rx_undermin_pkt_num;
+ u64 mac_rx_jabber_pkt_num;
+ u64 mac_rx_fcs_err_pkt_num;
+ u64 mac_rx_send_app_good_pkt_num;
+ u64 mac_rx_send_app_bad_pkt_num;
+ u64 mac_tx_pfc_pause_pkt_num;
+ u64 mac_rx_pfc_pause_pkt_num;
+ u64 mac_tx_ctrl_pkt_num;
+ u64 mac_rx_ctrl_pkt_num;
+
+ /* duration of pfc */
+ u64 mac_tx_pfc_pri0_xoff_time;
+ u64 mac_tx_pfc_pri1_xoff_time;
+ u64 mac_tx_pfc_pri2_xoff_time;
+ u64 mac_tx_pfc_pri3_xoff_time;
+ u64 mac_tx_pfc_pri4_xoff_time;
+ u64 mac_tx_pfc_pri5_xoff_time;
+ u64 mac_tx_pfc_pri6_xoff_time;
+ u64 mac_tx_pfc_pri7_xoff_time;
+ u64 mac_rx_pfc_pri0_xoff_time;
+ u64 mac_rx_pfc_pri1_xoff_time;
+ u64 mac_rx_pfc_pri2_xoff_time;
+ u64 mac_rx_pfc_pri3_xoff_time;
+ u64 mac_rx_pfc_pri4_xoff_time;
+ u64 mac_rx_pfc_pri5_xoff_time;
+ u64 mac_rx_pfc_pri6_xoff_time;
+ u64 mac_rx_pfc_pri7_xoff_time;
+
+ /* duration of pause */
+ u64 mac_tx_pause_xoff_time;
+ u64 mac_rx_pause_xoff_time;
+};
+
+#define HCLGE_STATS_TIMER_INTERVAL 300UL
+
+/* fec stats ,opcode id: 0x0316 */
+#define HCLGE_FEC_STATS_MAX_LANES 8
+struct hclge_fec_stats {
+ /* fec rs mode total stats */
+ u64 rs_corr_blocks;
+ u64 rs_uncorr_blocks;
+ u64 rs_error_blocks;
+ /* fec base-r mode per lanes stats */
+ u64 base_r_lane_num;
+ u64 base_r_corr_blocks;
+ u64 base_r_uncorr_blocks;
+ union {
+ struct {
+ u64 base_r_corr_per_lanes[HCLGE_FEC_STATS_MAX_LANES];
+ u64 base_r_uncorr_per_lanes[HCLGE_FEC_STATS_MAX_LANES];
+ };
+ u64 per_lanes[HCLGE_FEC_STATS_MAX_LANES * 2];
+ };
+};
+
+struct hclge_vlan_type_cfg {
+ u16 rx_ot_fst_vlan_type;
+ u16 rx_ot_sec_vlan_type;
+ u16 rx_in_fst_vlan_type;
+ u16 rx_in_sec_vlan_type;
+ u16 tx_ot_vlan_type;
+ u16 tx_in_vlan_type;
+};
+
+enum HCLGE_FD_MODE {
+ HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1,
+ HCLGE_FD_MODE_DEPTH_1K_WIDTH_400B_STAGE_2,
+ HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1,
+ HCLGE_FD_MODE_DEPTH_2K_WIDTH_200B_STAGE_2,
+};
+
+enum HCLGE_FD_KEY_TYPE {
+ HCLGE_FD_KEY_BASE_ON_PTYPE,
+ HCLGE_FD_KEY_BASE_ON_TUPLE,
+};
+
+enum HCLGE_FD_STAGE {
+ HCLGE_FD_STAGE_1,
+ HCLGE_FD_STAGE_2,
+ MAX_STAGE_NUM,
+};
+
+/* OUTER_XXX indicates tuples in tunnel header of tunnel packet
+ * INNER_XXX indicate tuples in tunneled header of tunnel packet or
+ * tuples of non-tunnel packet
+ */
+enum HCLGE_FD_TUPLE {
+ OUTER_DST_MAC,
+ OUTER_SRC_MAC,
+ OUTER_VLAN_TAG_FST,
+ OUTER_VLAN_TAG_SEC,
+ OUTER_ETH_TYPE,
+ OUTER_L2_RSV,
+ OUTER_IP_TOS,
+ OUTER_IP_PROTO,
+ OUTER_SRC_IP,
+ OUTER_DST_IP,
+ OUTER_L3_RSV,
+ OUTER_SRC_PORT,
+ OUTER_DST_PORT,
+ OUTER_L4_RSV,
+ OUTER_TUN_VNI,
+ OUTER_TUN_FLOW_ID,
+ INNER_DST_MAC,
+ INNER_SRC_MAC,
+ INNER_VLAN_TAG_FST,
+ INNER_VLAN_TAG_SEC,
+ INNER_ETH_TYPE,
+ INNER_L2_RSV,
+ INNER_IP_TOS,
+ INNER_IP_PROTO,
+ INNER_SRC_IP,
+ INNER_DST_IP,
+ INNER_L3_RSV,
+ INNER_SRC_PORT,
+ INNER_DST_PORT,
+ INNER_L4_RSV,
+ MAX_TUPLE,
+};
+
+#define HCLGE_FD_TUPLE_USER_DEF_TUPLES \
+ (BIT(INNER_L2_RSV) | BIT(INNER_L3_RSV) | BIT(INNER_L4_RSV))
+
+enum HCLGE_FD_META_DATA {
+ PACKET_TYPE_ID,
+ IP_FRAGEMENT,
+ ROCE_TYPE,
+ NEXT_KEY,
+ VLAN_NUMBER,
+ SRC_VPORT,
+ DST_VPORT,
+ TUNNEL_PACKET,
+ MAX_META_DATA,
+};
+
+enum HCLGE_FD_KEY_OPT {
+ KEY_OPT_U8,
+ KEY_OPT_LE16,
+ KEY_OPT_LE32,
+ KEY_OPT_MAC,
+ KEY_OPT_IP,
+ KEY_OPT_VNI,
+};
+
+struct key_info {
+ u8 key_type;
+ u8 key_length; /* use bit as unit */
+ enum HCLGE_FD_KEY_OPT key_opt;
+ int offset;
+ int moffset;
+};
+
+#define MAX_KEY_LENGTH 400
+#define MAX_KEY_DWORDS DIV_ROUND_UP(MAX_KEY_LENGTH / 8, 4)
+#define MAX_KEY_BYTES (MAX_KEY_DWORDS * 4)
+#define MAX_META_DATA_LENGTH 32
+
+#define HCLGE_FD_MAX_USER_DEF_OFFSET 9000
+#define HCLGE_FD_USER_DEF_DATA GENMASK(15, 0)
+#define HCLGE_FD_USER_DEF_OFFSET GENMASK(15, 0)
+#define HCLGE_FD_USER_DEF_OFFSET_UNMASK GENMASK(15, 0)
+
+/* assigned by firmware, the real filter number for each pf may be less */
+#define MAX_FD_FILTER_NUM 4096
+#define HCLGE_ARFS_EXPIRE_INTERVAL 5UL
+
+#define hclge_read_dev(a, reg) \
+ hclge_comm_read_reg((a)->hw.io_base, reg)
+#define hclge_write_dev(a, reg, value) \
+ hclge_comm_write_reg((a)->hw.io_base, reg, value)
+
+enum HCLGE_FD_ACTIVE_RULE_TYPE {
+ HCLGE_FD_RULE_NONE,
+ HCLGE_FD_ARFS_ACTIVE,
+ HCLGE_FD_EP_ACTIVE,
+ HCLGE_FD_TC_FLOWER_ACTIVE,
+};
+
+enum HCLGE_FD_PACKET_TYPE {
+ NIC_PACKET,
+ ROCE_PACKET,
+};
+
+enum HCLGE_FD_ACTION {
+ HCLGE_FD_ACTION_SELECT_QUEUE,
+ HCLGE_FD_ACTION_DROP_PACKET,
+ HCLGE_FD_ACTION_SELECT_TC,
+};
+
+enum HCLGE_FD_NODE_STATE {
+ HCLGE_FD_TO_ADD,
+ HCLGE_FD_TO_DEL,
+ HCLGE_FD_ACTIVE,
+ HCLGE_FD_DELETED,
+};
+
+enum HCLGE_FD_USER_DEF_LAYER {
+ HCLGE_FD_USER_DEF_NONE,
+ HCLGE_FD_USER_DEF_L2,
+ HCLGE_FD_USER_DEF_L3,
+ HCLGE_FD_USER_DEF_L4,
+};
+
+#define HCLGE_FD_USER_DEF_LAYER_NUM 3
+struct hclge_fd_user_def_cfg {
+ u16 ref_cnt;
+ u16 offset;
+};
+
+struct hclge_fd_user_def_info {
+ enum HCLGE_FD_USER_DEF_LAYER layer;
+ u16 data;
+ u16 data_mask;
+ u16 offset;
+};
+
+struct hclge_fd_key_cfg {
+ u8 key_sel;
+ u8 inner_sipv6_word_en;
+ u8 inner_dipv6_word_en;
+ u8 outer_sipv6_word_en;
+ u8 outer_dipv6_word_en;
+ u32 tuple_active;
+ u32 meta_data_active;
+};
+
+struct hclge_fd_cfg {
+ u8 fd_mode;
+ u16 max_key_length; /* use bit as unit */
+ u32 rule_num[MAX_STAGE_NUM]; /* rule entry number */
+ u16 cnt_num[MAX_STAGE_NUM]; /* rule hit counter number */
+ struct hclge_fd_key_cfg key_cfg[MAX_STAGE_NUM];
+ struct hclge_fd_user_def_cfg user_def_cfg[HCLGE_FD_USER_DEF_LAYER_NUM];
+};
+
+#define IPV4_INDEX 3
+#define IPV6_SIZE 4
+struct hclge_fd_rule_tuples {
+ u8 src_mac[ETH_ALEN];
+ u8 dst_mac[ETH_ALEN];
+ /* Be compatible for ip address of both ipv4 and ipv6.
+ * For ipv4 address, we store it in src/dst_ip[3].
+ */
+ u32 src_ip[IPV6_SIZE];
+ u32 dst_ip[IPV6_SIZE];
+ u16 src_port;
+ u16 dst_port;
+ u16 vlan_tag1;
+ u16 ether_proto;
+ u16 l2_user_def;
+ u16 l3_user_def;
+ u32 l4_user_def;
+ u8 ip_tos;
+ u8 ip_proto;
+};
+
+struct hclge_fd_rule {
+ struct hlist_node rule_node;
+ struct hclge_fd_rule_tuples tuples;
+ struct hclge_fd_rule_tuples tuples_mask;
+ u32 unused_tuple;
+ u32 flow_type;
+ union {
+ struct {
+ unsigned long cookie;
+ u8 tc;
+ } cls_flower;
+ struct {
+ u16 flow_id; /* only used for arfs */
+ } arfs;
+ struct {
+ struct hclge_fd_user_def_info user_def;
+ } ep;
+ };
+ u16 queue_id;
+ u16 vf_id;
+ u16 location;
+ enum HCLGE_FD_ACTIVE_RULE_TYPE rule_type;
+ enum HCLGE_FD_NODE_STATE state;
+ u8 action;
+};
+
+struct hclge_fd_ad_data {
+ u16 ad_id;
+ u8 drop_packet;
+ u8 forward_to_direct_queue;
+ u16 queue_id;
+ u8 use_counter;
+ u8 counter_id;
+ u8 use_next_stage;
+ u8 write_rule_id_to_bd;
+ u8 next_input_key;
+ u16 rule_id;
+ u16 tc_size;
+ u8 override_tc;
+};
+
+enum HCLGE_MAC_NODE_STATE {
+ HCLGE_MAC_TO_ADD,
+ HCLGE_MAC_TO_DEL,
+ HCLGE_MAC_ACTIVE
+};
+
+struct hclge_mac_node {
+ struct list_head node;
+ enum HCLGE_MAC_NODE_STATE state;
+ u8 mac_addr[ETH_ALEN];
+};
+
+enum HCLGE_MAC_ADDR_TYPE {
+ HCLGE_MAC_ADDR_UC,
+ HCLGE_MAC_ADDR_MC
+};
+
+struct hclge_vport_vlan_cfg {
+ struct list_head node;
+ int hd_tbl_status;
+ u16 vlan_id;
+};
+
+struct hclge_rst_stats {
+ u32 reset_done_cnt; /* the number of reset has completed */
+ u32 hw_reset_done_cnt; /* the number of HW reset has completed */
+ u32 pf_rst_cnt; /* the number of PF reset */
+ u32 flr_rst_cnt; /* the number of FLR */
+ u32 global_rst_cnt; /* the number of GLOBAL */
+ u32 imp_rst_cnt; /* the number of IMP reset */
+ u32 reset_cnt; /* the number of reset */
+ u32 reset_fail_cnt; /* the number of reset fail */
+};
+
+/* time and register status when mac tunnel interruption occur */
+struct hclge_mac_tnl_stats {
+ u64 time;
+ u32 status;
+};
+
+#define HCLGE_RESET_INTERVAL (10 * HZ)
+#define HCLGE_WAIT_RESET_DONE 100
+
+#pragma pack(1)
+struct hclge_vf_vlan_cfg {
+ u8 mbx_cmd;
+ u8 subcode;
+ union {
+ struct {
+ u8 is_kill;
+ __le16 vlan;
+ __le16 proto;
+ };
+ u8 enable;
+ };
+};
+
+#pragma pack()
+
+/* For each bit of TCAM entry, it uses a pair of 'x' and
+ * 'y' to indicate which value to match, like below:
+ * ----------------------------------
+ * | bit x | bit y | search value |
+ * ----------------------------------
+ * | 0 | 0 | always hit |
+ * ----------------------------------
+ * | 1 | 0 | match '0' |
+ * ----------------------------------
+ * | 0 | 1 | match '1' |
+ * ----------------------------------
+ * | 1 | 1 | invalid |
+ * ----------------------------------
+ * Then for input key(k) and mask(v), we can calculate the value by
+ * the formulae:
+ * x = (~k) & v
+ * y = k & v
+ */
+#define calc_x(x, k, v) ((x) = ~(k) & (v))
+#define calc_y(y, k, v) ((y) = (k) & (v))
+
+#define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
+#define HCLGE_STATS_READ(p, offset) (*(u64 *)((u8 *)(p) + (offset)))
+
+#define HCLGE_MAC_TNL_LOG_SIZE 8
+#define HCLGE_VPORT_NUM 256
+struct hclge_dev {
+ struct pci_dev *pdev;
+ struct hnae3_ae_dev *ae_dev;
+ struct hclge_hw hw;
+ struct hclge_misc_vector misc_vector;
+ struct hclge_mac_stats mac_stats;
+ struct hclge_fec_stats fec_stats;
+ unsigned long state;
+ unsigned long flr_state;
+ unsigned long last_reset_time;
+
+ enum hnae3_reset_type reset_type;
+ enum hnae3_reset_type reset_level;
+ unsigned long default_reset_request;
+ unsigned long reset_request; /* reset has been requested */
+ unsigned long reset_pending; /* client rst is pending to be served */
+ struct hclge_rst_stats rst_stats;
+ struct semaphore reset_sem; /* protect reset process */
+ u32 fw_version;
+ u16 num_tqps; /* Num task queue pairs of this PF */
+ u16 num_req_vfs; /* Num VFs requested for this PF */
+
+ u16 base_tqp_pid; /* Base task tqp physical id of this PF */
+ u16 alloc_rss_size; /* Allocated RSS task queue */
+ u16 vf_rss_size_max; /* HW defined VF max RSS task queue */
+ u16 pf_rss_size_max; /* HW defined PF max RSS task queue */
+ u32 tx_spare_buf_size; /* HW defined TX spare buffer size */
+
+ u16 fdir_pf_filter_count; /* Num of guaranteed filters for this PF */
+ u16 num_alloc_vport; /* Num vports this driver supports */
+ u32 numa_node_mask;
+ u16 rx_buf_len;
+ u16 num_tx_desc; /* desc num of per tx queue */
+ u16 num_rx_desc; /* desc num of per rx queue */
+ u8 hw_tc_map;
+ enum hclge_fc_mode fc_mode_last_time;
+ u8 support_sfp_query;
+
+#define HCLGE_FLAG_TC_BASE_SCH_MODE 1
+#define HCLGE_FLAG_VNET_BASE_SCH_MODE 2
+ u8 tx_sch_mode;
+ u8 tc_max;
+ u8 pfc_max;
+
+ u8 default_up;
+ u8 dcbx_cap;
+ struct hclge_tm_info tm_info;
+
+ u16 num_msi;
+ u16 num_msi_left;
+ u16 num_msi_used;
+ u16 *vector_status;
+ int *vector_irq;
+ u16 num_nic_msi; /* Num of nic vectors for this PF */
+ u16 num_roce_msi; /* Num of roce vectors for this PF */
+
+ unsigned long service_timer_period;
+ unsigned long service_timer_previous;
+ struct timer_list reset_timer;
+ struct delayed_work service_task;
+
+ bool cur_promisc;
+ int num_alloc_vfs; /* Actual number of VFs allocated */
+
+ struct hclge_comm_tqp *htqp;
+ struct hclge_vport *vport;
+
+ struct dentry *hclge_dbgfs;
+
+ struct hnae3_client *nic_client;
+ struct hnae3_client *roce_client;
+
+#define HCLGE_FLAG_MAIN BIT(0)
+#define HCLGE_FLAG_DCB_CAPABLE BIT(1)
+ u32 flag;
+
+ u32 pkt_buf_size; /* Total pf buf size for tx/rx */
+ u32 tx_buf_size; /* Tx buffer size for each TC */
+ u32 dv_buf_size; /* Dv buffer size for each TC */
+
+ u32 mps; /* Max packet size */
+ /* vport_lock protect resource shared by vports */
+ struct mutex vport_lock;
+
+ struct hclge_vlan_type_cfg vlan_type_cfg;
+
+ unsigned long vlan_table[VLAN_N_VID][BITS_TO_LONGS(HCLGE_VPORT_NUM)];
+ unsigned long vf_vlan_full[BITS_TO_LONGS(HCLGE_VPORT_NUM)];
+
+ unsigned long vport_config_block[BITS_TO_LONGS(HCLGE_VPORT_NUM)];
+
+ struct hclge_fd_cfg fd_cfg;
+ struct hlist_head fd_rule_list;
+ spinlock_t fd_rule_lock; /* protect fd_rule_list and fd_bmap */
+ u16 hclge_fd_rule_num;
+ unsigned long serv_processed_cnt;
+ unsigned long last_serv_processed;
+ unsigned long last_rst_scheduled;
+ unsigned long last_mbx_scheduled;
+ unsigned long fd_bmap[BITS_TO_LONGS(MAX_FD_FILTER_NUM)];
+ enum HCLGE_FD_ACTIVE_RULE_TYPE fd_active_type;
+ u8 fd_en;
+ bool gro_en;
+
+ u16 wanted_umv_size;
+ /* max available unicast mac vlan space */
+ u16 max_umv_size;
+ /* private unicast mac vlan space, it's same for PF and its VFs */
+ u16 priv_umv_size;
+ /* unicast mac vlan space shared by PF and its VFs */
+ u16 share_umv_size;
+ /* multicast mac address number used by PF and its VFs */
+ u16 used_mc_mac_num;
+
+ DECLARE_KFIFO(mac_tnl_log, struct hclge_mac_tnl_stats,
+ HCLGE_MAC_TNL_LOG_SIZE);
+
+ struct hclge_ptp *ptp;
+ struct devlink *devlink;
+ struct hclge_comm_rss_cfg rss_cfg;
+};
+
+/* VPort level vlan tag configuration for TX direction */
+struct hclge_tx_vtag_cfg {
+ bool accept_tag1; /* Whether accept tag1 packet from host */
+ bool accept_untag1; /* Whether accept untag1 packet from host */
+ bool accept_tag2;
+ bool accept_untag2;
+ bool insert_tag1_en; /* Whether insert inner vlan tag */
+ bool insert_tag2_en; /* Whether insert outer vlan tag */
+ u16 default_tag1; /* The default inner vlan tag to insert */
+ u16 default_tag2; /* The default outer vlan tag to insert */
+ bool tag_shift_mode_en;
+};
+
+/* VPort level vlan tag configuration for RX direction */
+struct hclge_rx_vtag_cfg {
+ bool rx_vlan_offload_en; /* Whether enable rx vlan offload */
+ bool strip_tag1_en; /* Whether strip inner vlan tag */
+ bool strip_tag2_en; /* Whether strip outer vlan tag */
+ bool vlan1_vlan_prionly; /* Inner vlan tag up to descriptor enable */
+ bool vlan2_vlan_prionly; /* Outer vlan tag up to descriptor enable */
+ bool strip_tag1_discard_en; /* Inner vlan tag discard for BD enable */
+ bool strip_tag2_discard_en; /* Outer vlan tag discard for BD enable */
+};
+
+enum HCLGE_VPORT_STATE {
+ HCLGE_VPORT_STATE_ALIVE,
+ HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
+ HCLGE_VPORT_STATE_PROMISC_CHANGE,
+ HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
+ HCLGE_VPORT_STATE_INITED,
+ HCLGE_VPORT_STATE_MAX
+};
+
+enum HCLGE_VPORT_NEED_NOTIFY {
+ HCLGE_VPORT_NEED_NOTIFY_RESET,
+ HCLGE_VPORT_NEED_NOTIFY_VF_VLAN,
+};
+
+struct hclge_vlan_info {
+ u16 vlan_proto; /* so far support 802.1Q only */
+ u16 qos;
+ u16 vlan_tag;
+};
+
+struct hclge_port_base_vlan_config {
+ u16 state;
+ bool tbl_sta;
+ struct hclge_vlan_info vlan_info;
+ struct hclge_vlan_info old_vlan_info;
+};
+
+struct hclge_vf_info {
+ int link_state;
+ u8 mac[ETH_ALEN];
+ u32 spoofchk;
+ u32 max_tx_rate;
+ u32 trusted;
+ u8 request_uc_en;
+ u8 request_mc_en;
+ u8 request_bc_en;
+};
+
+struct hclge_vport {
+ u16 alloc_tqps; /* Allocated Tx/Rx queues */
+
+ u16 qs_offset;
+ u32 bw_limit; /* VSI BW Limit (0 = disabled) */
+ u8 dwrr;
+
+ bool req_vlan_fltr_en;
+ bool cur_vlan_fltr_en;
+ unsigned long vlan_del_fail_bmap[BITS_TO_LONGS(VLAN_N_VID)];
+ struct hclge_port_base_vlan_config port_base_vlan_cfg;
+ struct hclge_tx_vtag_cfg txvlan_cfg;
+ struct hclge_rx_vtag_cfg rxvlan_cfg;
+
+ u16 used_umv_num;
+
+ u16 vport_id;
+ struct hclge_dev *back; /* Back reference to associated dev */
+ struct hnae3_handle nic;
+ struct hnae3_handle roce;
+
+ unsigned long state;
+ unsigned long need_notify;
+ unsigned long last_active_jiffies;
+ u32 mps; /* Max packet size */
+ struct hclge_vf_info vf_info;
+
+ u8 overflow_promisc_flags;
+ u8 last_promisc_flags;
+
+ spinlock_t mac_list_lock; /* protect mac address need to add/detele */
+ struct list_head uc_mac_list; /* Store VF unicast table */
+ struct list_head mc_mac_list; /* Store VF multicast table */
+
+ struct list_head vlan_list; /* Store VF vlan table */
+};
+
+struct hclge_speed_bit_map {
+ u32 speed;
+ u32 speed_bit;
+};
+
+struct hclge_mac_speed_map {
+ u32 speed_drv; /* speed defined in driver */
+ u32 speed_fw; /* speed defined in firmware */
+};
+
+int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
+ bool en_mc_pmc, bool en_bc_pmc);
+int hclge_add_uc_addr_common(struct hclge_vport *vport,
+ const unsigned char *addr);
+int hclge_rm_uc_addr_common(struct hclge_vport *vport,
+ const unsigned char *addr);
+int hclge_add_mc_addr_common(struct hclge_vport *vport,
+ const unsigned char *addr);
+int hclge_rm_mc_addr_common(struct hclge_vport *vport,
+ const unsigned char *addr);
+
+struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle);
+int hclge_bind_ring_with_vector(struct hclge_vport *vport,
+ int vector_id, bool en,
+ struct hnae3_ring_chain_node *ring_chain);
+
+static inline int hclge_get_queue_id(struct hnae3_queue *queue)
+{
+ struct hclge_comm_tqp *tqp =
+ container_of(queue, struct hclge_comm_tqp, q);
+
+ return tqp->index;
+}
+
+int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport);
+int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex, u8 lane_num);
+int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
+ u16 vlan_id, bool is_kill);
+int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable);
+
+int hclge_buffer_alloc(struct hclge_dev *hdev);
+int hclge_rss_init_hw(struct hclge_dev *hdev);
+
+void hclge_mbx_handler(struct hclge_dev *hdev);
+int hclge_reset_tqp(struct hnae3_handle *handle);
+int hclge_cfg_flowctrl(struct hclge_dev *hdev);
+int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id);
+int hclge_vport_start(struct hclge_vport *vport);
+void hclge_vport_stop(struct hclge_vport *vport);
+int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu);
+int hclge_dbg_read_cmd(struct hnae3_handle *handle, enum hnae3_dbg_cmd cmd,
+ char *buf, int len);
+u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id);
+int hclge_notify_client(struct hclge_dev *hdev,
+ enum hnae3_reset_notify_type type);
+int hclge_update_mac_list(struct hclge_vport *vport,
+ enum HCLGE_MAC_NODE_STATE state,
+ enum HCLGE_MAC_ADDR_TYPE mac_type,
+ const unsigned char *addr);
+int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
+ const u8 *old_addr, const u8 *new_addr);
+void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
+ enum HCLGE_MAC_ADDR_TYPE mac_type);
+void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list);
+void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev);
+void hclge_restore_mac_table_common(struct hclge_vport *vport);
+void hclge_restore_vport_port_base_vlan_config(struct hclge_dev *hdev);
+void hclge_restore_vport_vlan_table(struct hclge_vport *vport);
+int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
+ struct hclge_vlan_info *vlan_info);
+int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid,
+ u16 state,
+ struct hclge_vlan_info *vlan_info);
+void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time);
+void hclge_report_hw_error(struct hclge_dev *hdev,
+ enum hnae3_hw_error_type type);
+int hclge_dbg_dump_rst_info(struct hclge_dev *hdev, char *buf, int len);
+int hclge_push_vf_link_status(struct hclge_vport *vport);
+int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en);
+int hclge_mac_update_stats(struct hclge_dev *hdev);
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
new file mode 100644
index 0000000000..04ff9bf121
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -0,0 +1,1152 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
+
+#include "hclge_main.h"
+#include "hclge_mbx.h"
+#include "hnae3.h"
+#include "hclge_comm_rss.h"
+
+#define CREATE_TRACE_POINTS
+#include "hclge_trace.h"
+
+static u16 hclge_errno_to_resp(int errno)
+{
+ int resp = abs(errno);
+
+ /* The status for pf to vf msg cmd is u16, constrainted by HW.
+ * We need to keep the same type with it.
+ * The intput errno is the stander error code, it's safely to
+ * use a u16 to store the abs(errno).
+ */
+ return (u16)resp;
+}
+
+/* hclge_gen_resp_to_vf: used to generate a synchronous response to VF when PF
+ * receives a mailbox message from VF.
+ * @vport: pointer to struct hclge_vport
+ * @vf_to_pf_req: pointer to hclge_mbx_vf_to_pf_cmd of the original mailbox
+ * message
+ * @resp_status: indicate to VF whether its request success(0) or failed.
+ */
+static int hclge_gen_resp_to_vf(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *vf_to_pf_req,
+ struct hclge_respond_to_vf_msg *resp_msg)
+{
+ struct hclge_mbx_pf_to_vf_cmd *resp_pf_to_vf;
+ struct hclge_dev *hdev = vport->back;
+ enum hclge_comm_cmd_status status;
+ struct hclge_desc desc;
+ u16 resp;
+
+ resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data;
+
+ if (resp_msg->len > HCLGE_MBX_MAX_RESP_DATA_SIZE) {
+ dev_err(&hdev->pdev->dev,
+ "PF fail to gen resp to VF len %u exceeds max len %u\n",
+ resp_msg->len,
+ HCLGE_MBX_MAX_RESP_DATA_SIZE);
+ /* If resp_msg->len is too long, set the value to max length
+ * and return the msg to VF
+ */
+ resp_msg->len = HCLGE_MBX_MAX_RESP_DATA_SIZE;
+ }
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false);
+
+ resp_pf_to_vf->dest_vfid = vf_to_pf_req->mbx_src_vfid;
+ resp_pf_to_vf->msg_len = vf_to_pf_req->msg_len;
+ resp_pf_to_vf->match_id = vf_to_pf_req->match_id;
+
+ resp_pf_to_vf->msg.code = cpu_to_le16(HCLGE_MBX_PF_VF_RESP);
+ resp_pf_to_vf->msg.vf_mbx_msg_code =
+ cpu_to_le16(vf_to_pf_req->msg.code);
+ resp_pf_to_vf->msg.vf_mbx_msg_subcode =
+ cpu_to_le16(vf_to_pf_req->msg.subcode);
+ resp = hclge_errno_to_resp(resp_msg->status);
+ if (resp < SHRT_MAX) {
+ resp_pf_to_vf->msg.resp_status = cpu_to_le16(resp);
+ } else {
+ dev_warn(&hdev->pdev->dev,
+ "failed to send response to VF, response status %u is out-of-bound\n",
+ resp);
+ resp_pf_to_vf->msg.resp_status = cpu_to_le16(EIO);
+ }
+
+ if (resp_msg->len > 0)
+ memcpy(resp_pf_to_vf->msg.resp_data, resp_msg->data,
+ resp_msg->len);
+
+ trace_hclge_pf_mbx_send(hdev, resp_pf_to_vf);
+
+ status = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "failed to send response to VF, status: %d, vfid: %u, code: %u, subcode: %u.\n",
+ status, vf_to_pf_req->mbx_src_vfid,
+ vf_to_pf_req->msg.code, vf_to_pf_req->msg.subcode);
+
+ return status;
+}
+
+static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len,
+ u16 mbx_opcode, u8 dest_vfid)
+{
+ struct hclge_mbx_pf_to_vf_cmd *resp_pf_to_vf;
+ struct hclge_dev *hdev = vport->back;
+ enum hclge_comm_cmd_status status;
+ struct hclge_desc desc;
+
+ if (msg_len > HCLGE_MBX_MAX_MSG_SIZE) {
+ dev_err(&hdev->pdev->dev,
+ "msg data length(=%u) exceeds maximum(=%u)\n",
+ msg_len, HCLGE_MBX_MAX_MSG_SIZE);
+ return -EMSGSIZE;
+ }
+
+ resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false);
+
+ resp_pf_to_vf->dest_vfid = dest_vfid;
+ resp_pf_to_vf->msg_len = msg_len;
+ resp_pf_to_vf->msg.code = cpu_to_le16(mbx_opcode);
+
+ memcpy(resp_pf_to_vf->msg.msg_data, msg, msg_len);
+
+ trace_hclge_pf_mbx_send(hdev, resp_pf_to_vf);
+
+ status = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "failed to send mailbox to VF, status: %d, vfid: %u, opcode: %u\n",
+ status, dest_vfid, mbx_opcode);
+
+ return status;
+}
+
+static int hclge_inform_vf_reset(struct hclge_vport *vport, u16 reset_type)
+{
+ __le16 msg_data;
+ u8 dest_vfid;
+
+ dest_vfid = (u8)vport->vport_id;
+ msg_data = cpu_to_le16(reset_type);
+
+ /* send this requested info to VF */
+ return hclge_send_mbx_msg(vport, (u8 *)&msg_data, sizeof(msg_data),
+ HCLGE_MBX_ASSERTING_RESET, dest_vfid);
+}
+
+int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport)
+{
+ struct hclge_dev *hdev = vport->back;
+ u16 reset_type;
+
+ BUILD_BUG_ON(HNAE3_MAX_RESET > U16_MAX);
+
+ if (hdev->reset_type == HNAE3_FUNC_RESET)
+ reset_type = HNAE3_VF_PF_FUNC_RESET;
+ else if (hdev->reset_type == HNAE3_FLR_RESET)
+ reset_type = HNAE3_VF_FULL_RESET;
+ else
+ reset_type = HNAE3_VF_FUNC_RESET;
+
+ return hclge_inform_vf_reset(vport, reset_type);
+}
+
+static void hclge_free_vector_ring_chain(struct hnae3_ring_chain_node *head)
+{
+ struct hnae3_ring_chain_node *chain_tmp, *chain;
+
+ chain = head->next;
+
+ while (chain) {
+ chain_tmp = chain->next;
+ kfree_sensitive(chain);
+ chain = chain_tmp;
+ }
+}
+
+/* hclge_get_ring_chain_from_mbx: get ring type & tqp id & int_gl idx
+ * from mailbox message
+ * msg[0]: opcode
+ * msg[1]: <not relevant to this function>
+ * msg[2]: ring_num
+ * msg[3]: first ring type (TX|RX)
+ * msg[4]: first tqp id
+ * msg[5]: first int_gl idx
+ * msg[6] ~ msg[14]: other ring type, tqp id and int_gl idx
+ */
+static int hclge_get_ring_chain_from_mbx(
+ struct hclge_mbx_vf_to_pf_cmd *req,
+ struct hnae3_ring_chain_node *ring_chain,
+ struct hclge_vport *vport)
+{
+ struct hnae3_ring_chain_node *cur_chain, *new_chain;
+ struct hclge_dev *hdev = vport->back;
+ int ring_num;
+ int i;
+
+ ring_num = req->msg.ring_num;
+
+ if (ring_num > HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM)
+ return -EINVAL;
+
+ for (i = 0; i < ring_num; i++) {
+ if (req->msg.param[i].tqp_index >= vport->nic.kinfo.rss_size) {
+ dev_err(&hdev->pdev->dev, "tqp index(%u) is out of range(0-%u)\n",
+ req->msg.param[i].tqp_index,
+ vport->nic.kinfo.rss_size - 1U);
+ return -EINVAL;
+ }
+ }
+
+ hnae3_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B,
+ req->msg.param[0].ring_type);
+ ring_chain->tqp_index =
+ hclge_get_queue_id(vport->nic.kinfo.tqp
+ [req->msg.param[0].tqp_index]);
+ hnae3_set_field(ring_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S, req->msg.param[0].int_gl_index);
+
+ cur_chain = ring_chain;
+
+ for (i = 1; i < ring_num; i++) {
+ new_chain = kzalloc(sizeof(*new_chain), GFP_KERNEL);
+ if (!new_chain)
+ goto err;
+
+ hnae3_set_bit(new_chain->flag, HNAE3_RING_TYPE_B,
+ req->msg.param[i].ring_type);
+
+ new_chain->tqp_index =
+ hclge_get_queue_id(vport->nic.kinfo.tqp
+ [req->msg.param[i].tqp_index]);
+
+ hnae3_set_field(new_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S,
+ req->msg.param[i].int_gl_index);
+
+ cur_chain->next = new_chain;
+ cur_chain = new_chain;
+ }
+
+ return 0;
+err:
+ hclge_free_vector_ring_chain(ring_chain);
+ return -ENOMEM;
+}
+
+static int hclge_map_unmap_ring_to_vf_vector(struct hclge_vport *vport, bool en,
+ struct hclge_mbx_vf_to_pf_cmd *req)
+{
+ struct hnae3_ring_chain_node ring_chain;
+ int vector_id = req->msg.vector_id;
+ int ret;
+
+ memset(&ring_chain, 0, sizeof(ring_chain));
+ ret = hclge_get_ring_chain_from_mbx(req, &ring_chain, vport);
+ if (ret)
+ return ret;
+
+ ret = hclge_bind_ring_with_vector(vport, vector_id, en, &ring_chain);
+
+ hclge_free_vector_ring_chain(&ring_chain);
+
+ return ret;
+}
+
+static int hclge_query_ring_vector_map(struct hclge_vport *vport,
+ struct hnae3_ring_chain_node *ring_chain,
+ struct hclge_desc *desc)
+{
+ struct hclge_ctrl_vector_chain_cmd *req =
+ (struct hclge_ctrl_vector_chain_cmd *)desc->data;
+ struct hclge_dev *hdev = vport->back;
+ u16 tqp_type_and_id;
+ int status;
+
+ hclge_cmd_setup_basic_desc(desc, HCLGE_OPC_ADD_RING_TO_VECTOR, true);
+
+ tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[0]);
+ hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M, HCLGE_INT_TYPE_S,
+ hnae3_get_bit(ring_chain->flag, HNAE3_RING_TYPE_B));
+ hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M, HCLGE_TQP_ID_S,
+ ring_chain->tqp_index);
+ req->tqp_type_and_id[0] = cpu_to_le16(tqp_type_and_id);
+ req->vfid = vport->vport_id;
+
+ status = hclge_cmd_send(&hdev->hw, desc, 1);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "Get VF ring vector map info fail, status is %d.\n",
+ status);
+
+ return status;
+}
+
+static int hclge_get_vf_ring_vector_map(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *req,
+ struct hclge_respond_to_vf_msg *resp)
+{
+#define HCLGE_LIMIT_RING_NUM 1
+#define HCLGE_RING_TYPE_OFFSET 0
+#define HCLGE_TQP_INDEX_OFFSET 1
+#define HCLGE_INT_GL_INDEX_OFFSET 2
+#define HCLGE_VECTOR_ID_OFFSET 3
+#define HCLGE_RING_VECTOR_MAP_INFO_LEN 4
+ struct hnae3_ring_chain_node ring_chain;
+ struct hclge_desc desc;
+ struct hclge_ctrl_vector_chain_cmd *data =
+ (struct hclge_ctrl_vector_chain_cmd *)desc.data;
+ u16 tqp_type_and_id;
+ u8 int_gl_index;
+ int ret;
+
+ req->msg.ring_num = HCLGE_LIMIT_RING_NUM;
+
+ memset(&ring_chain, 0, sizeof(ring_chain));
+ ret = hclge_get_ring_chain_from_mbx(req, &ring_chain, vport);
+ if (ret)
+ return ret;
+
+ ret = hclge_query_ring_vector_map(vport, &ring_chain, &desc);
+ if (ret) {
+ hclge_free_vector_ring_chain(&ring_chain);
+ return ret;
+ }
+
+ tqp_type_and_id = le16_to_cpu(data->tqp_type_and_id[0]);
+ int_gl_index = hnae3_get_field(tqp_type_and_id,
+ HCLGE_INT_GL_IDX_M, HCLGE_INT_GL_IDX_S);
+
+ resp->data[HCLGE_RING_TYPE_OFFSET] = req->msg.param[0].ring_type;
+ resp->data[HCLGE_TQP_INDEX_OFFSET] = req->msg.param[0].tqp_index;
+ resp->data[HCLGE_INT_GL_INDEX_OFFSET] = int_gl_index;
+ resp->data[HCLGE_VECTOR_ID_OFFSET] = data->int_vector_id_l;
+ resp->len = HCLGE_RING_VECTOR_MAP_INFO_LEN;
+
+ hclge_free_vector_ring_chain(&ring_chain);
+
+ return ret;
+}
+
+static void hclge_set_vf_promisc_mode(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *req)
+{
+ struct hnae3_handle *handle = &vport->nic;
+ struct hclge_dev *hdev = vport->back;
+
+ vport->vf_info.request_uc_en = req->msg.en_uc;
+ vport->vf_info.request_mc_en = req->msg.en_mc;
+ vport->vf_info.request_bc_en = req->msg.en_bc;
+
+ if (req->msg.en_limit_promisc)
+ set_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags);
+ else
+ clear_bit(HNAE3_PFLAG_LIMIT_PROMISC,
+ &handle->priv_flags);
+
+ set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
+ hclge_task_schedule(hdev, 0);
+}
+
+static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+{
+#define HCLGE_MBX_VF_OLD_MAC_ADDR_OFFSET 6
+
+ const u8 *mac_addr = (const u8 *)(mbx_req->msg.data);
+ struct hclge_dev *hdev = vport->back;
+ int status;
+
+ if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_UC_MODIFY) {
+ const u8 *old_addr = (const u8 *)
+ (&mbx_req->msg.data[HCLGE_MBX_VF_OLD_MAC_ADDR_OFFSET]);
+
+ /* If VF MAC has been configured by the host then it
+ * cannot be overridden by the MAC specified by the VM.
+ */
+ if (!is_zero_ether_addr(vport->vf_info.mac) &&
+ !ether_addr_equal(mac_addr, vport->vf_info.mac))
+ return -EPERM;
+
+ if (!is_valid_ether_addr(mac_addr))
+ return -EINVAL;
+
+ spin_lock_bh(&vport->mac_list_lock);
+ status = hclge_update_mac_node_for_dev_addr(vport, old_addr,
+ mac_addr);
+ spin_unlock_bh(&vport->mac_list_lock);
+ hclge_task_schedule(hdev, 0);
+ } else if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_UC_ADD) {
+ status = hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD,
+ HCLGE_MAC_ADDR_UC, mac_addr);
+ } else if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_UC_REMOVE) {
+ status = hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL,
+ HCLGE_MAC_ADDR_UC, mac_addr);
+ } else {
+ dev_err(&hdev->pdev->dev,
+ "failed to set unicast mac addr, unknown subcode %u\n",
+ mbx_req->msg.subcode);
+ return -EIO;
+ }
+
+ return status;
+}
+
+static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+{
+ const u8 *mac_addr = (const u8 *)(mbx_req->msg.data);
+ struct hclge_dev *hdev = vport->back;
+
+ if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_MC_ADD) {
+ hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD,
+ HCLGE_MAC_ADDR_MC, mac_addr);
+ } else if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_MC_REMOVE) {
+ hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL,
+ HCLGE_MAC_ADDR_MC, mac_addr);
+ } else {
+ dev_err(&hdev->pdev->dev,
+ "failed to set mcast mac addr, unknown subcode %u\n",
+ mbx_req->msg.subcode);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid,
+ u16 state,
+ struct hclge_vlan_info *vlan_info)
+{
+ struct hclge_mbx_port_base_vlan base_vlan;
+
+ base_vlan.state = cpu_to_le16(state);
+ base_vlan.vlan_proto = cpu_to_le16(vlan_info->vlan_proto);
+ base_vlan.qos = cpu_to_le16(vlan_info->qos);
+ base_vlan.vlan_tag = cpu_to_le16(vlan_info->vlan_tag);
+
+ return hclge_send_mbx_msg(vport, (u8 *)&base_vlan, sizeof(base_vlan),
+ HCLGE_MBX_PUSH_VLAN_INFO, vfid);
+}
+
+static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+ struct hclge_respond_to_vf_msg *resp_msg)
+{
+#define HCLGE_MBX_VLAN_STATE_OFFSET 0
+#define HCLGE_MBX_VLAN_INFO_OFFSET 2
+
+ struct hnae3_handle *handle = &vport->nic;
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_vf_vlan_cfg *msg_cmd;
+ __be16 proto;
+ u16 vlan_id;
+
+ msg_cmd = (struct hclge_vf_vlan_cfg *)&mbx_req->msg;
+ switch (msg_cmd->subcode) {
+ case HCLGE_MBX_VLAN_FILTER:
+ proto = cpu_to_be16(le16_to_cpu(msg_cmd->proto));
+ vlan_id = le16_to_cpu(msg_cmd->vlan);
+ return hclge_set_vlan_filter(handle, proto, vlan_id,
+ msg_cmd->is_kill);
+ case HCLGE_MBX_VLAN_RX_OFF_CFG:
+ return hclge_en_hw_strip_rxvtag(handle, msg_cmd->enable);
+ case HCLGE_MBX_GET_PORT_BASE_VLAN_STATE:
+ /* vf does not need to know about the port based VLAN state
+ * on device HNAE3_DEVICE_VERSION_V3. So always return disable
+ * on device HNAE3_DEVICE_VERSION_V3 if vf queries the port
+ * based VLAN state.
+ */
+ resp_msg->data[0] =
+ hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3 ?
+ HNAE3_PORT_BASE_VLAN_DISABLE :
+ vport->port_base_vlan_cfg.state;
+ resp_msg->len = sizeof(u8);
+ return 0;
+ case HCLGE_MBX_ENABLE_VLAN_FILTER:
+ return hclge_enable_vport_vlan_filter(vport, msg_cmd->enable);
+ default:
+ return 0;
+ }
+}
+
+static int hclge_set_vf_alive(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+{
+ bool alive = !!mbx_req->msg.data[0];
+ int ret = 0;
+
+ if (alive)
+ ret = hclge_vport_start(vport);
+ else
+ hclge_vport_stop(vport);
+
+ return ret;
+}
+
+static void hclge_get_basic_info(struct hclge_vport *vport,
+ struct hclge_respond_to_vf_msg *resp_msg)
+{
+ struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
+ struct hnae3_ae_dev *ae_dev = vport->back->ae_dev;
+ struct hclge_basic_info *basic_info;
+ unsigned int i;
+ u32 pf_caps;
+
+ basic_info = (struct hclge_basic_info *)resp_msg->data;
+ for (i = 0; i < kinfo->tc_info.num_tc; i++)
+ basic_info->hw_tc_map |= BIT(i);
+
+ pf_caps = le32_to_cpu(basic_info->pf_caps);
+ if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps))
+ hnae3_set_bit(pf_caps, HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B, 1);
+
+ basic_info->pf_caps = cpu_to_le32(pf_caps);
+ resp_msg->len = HCLGE_MBX_MAX_RESP_DATA_SIZE;
+}
+
+static void hclge_get_vf_queue_info(struct hclge_vport *vport,
+ struct hclge_respond_to_vf_msg *resp_msg)
+{
+#define HCLGE_TQPS_RSS_INFO_LEN 6
+
+ struct hclge_mbx_vf_queue_info *queue_info;
+ struct hclge_dev *hdev = vport->back;
+
+ /* get the queue related info */
+ queue_info = (struct hclge_mbx_vf_queue_info *)resp_msg->data;
+ queue_info->num_tqps = cpu_to_le16(vport->alloc_tqps);
+ queue_info->rss_size = cpu_to_le16(vport->nic.kinfo.rss_size);
+ queue_info->rx_buf_len = cpu_to_le16(hdev->rx_buf_len);
+ resp_msg->len = HCLGE_TQPS_RSS_INFO_LEN;
+}
+
+static void hclge_get_vf_mac_addr(struct hclge_vport *vport,
+ struct hclge_respond_to_vf_msg *resp_msg)
+{
+ ether_addr_copy(resp_msg->data, vport->vf_info.mac);
+ resp_msg->len = ETH_ALEN;
+}
+
+static void hclge_get_vf_queue_depth(struct hclge_vport *vport,
+ struct hclge_respond_to_vf_msg *resp_msg)
+{
+#define HCLGE_TQPS_DEPTH_INFO_LEN 4
+
+ struct hclge_mbx_vf_queue_depth *queue_depth;
+ struct hclge_dev *hdev = vport->back;
+
+ /* get the queue depth info */
+ queue_depth = (struct hclge_mbx_vf_queue_depth *)resp_msg->data;
+ queue_depth->num_tx_desc = cpu_to_le16(hdev->num_tx_desc);
+ queue_depth->num_rx_desc = cpu_to_le16(hdev->num_rx_desc);
+
+ resp_msg->len = HCLGE_TQPS_DEPTH_INFO_LEN;
+}
+
+static void hclge_get_vf_media_type(struct hclge_vport *vport,
+ struct hclge_respond_to_vf_msg *resp_msg)
+{
+#define HCLGE_VF_MEDIA_TYPE_OFFSET 0
+#define HCLGE_VF_MODULE_TYPE_OFFSET 1
+#define HCLGE_VF_MEDIA_TYPE_LENGTH 2
+
+ struct hclge_dev *hdev = vport->back;
+
+ resp_msg->data[HCLGE_VF_MEDIA_TYPE_OFFSET] =
+ hdev->hw.mac.media_type;
+ resp_msg->data[HCLGE_VF_MODULE_TYPE_OFFSET] =
+ hdev->hw.mac.module_type;
+ resp_msg->len = HCLGE_VF_MEDIA_TYPE_LENGTH;
+}
+
+int hclge_push_vf_link_status(struct hclge_vport *vport)
+{
+#define HCLGE_VF_LINK_STATE_UP 1U
+#define HCLGE_VF_LINK_STATE_DOWN 0U
+
+ struct hclge_mbx_link_status link_info;
+ struct hclge_dev *hdev = vport->back;
+ u16 link_status;
+
+ /* mac.link can only be 0 or 1 */
+ switch (vport->vf_info.link_state) {
+ case IFLA_VF_LINK_STATE_ENABLE:
+ link_status = HCLGE_VF_LINK_STATE_UP;
+ break;
+ case IFLA_VF_LINK_STATE_DISABLE:
+ link_status = HCLGE_VF_LINK_STATE_DOWN;
+ break;
+ case IFLA_VF_LINK_STATE_AUTO:
+ default:
+ link_status = (u16)hdev->hw.mac.link;
+ break;
+ }
+
+ link_info.link_status = cpu_to_le16(link_status);
+ link_info.speed = cpu_to_le32(hdev->hw.mac.speed);
+ link_info.duplex = cpu_to_le16(hdev->hw.mac.duplex);
+ link_info.flag = HCLGE_MBX_PUSH_LINK_STATUS_EN;
+
+ /* send this requested info to VF */
+ return hclge_send_mbx_msg(vport, (u8 *)&link_info, sizeof(link_info),
+ HCLGE_MBX_LINK_STAT_CHANGE, vport->vport_id);
+}
+
+static void hclge_get_link_mode(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+{
+#define HCLGE_SUPPORTED 1
+ struct hclge_mbx_link_mode link_mode;
+ struct hclge_dev *hdev = vport->back;
+ unsigned long advertising;
+ unsigned long supported;
+ unsigned long send_data;
+ u8 dest_vfid;
+
+ advertising = hdev->hw.mac.advertising[0];
+ supported = hdev->hw.mac.supported[0];
+ dest_vfid = mbx_req->mbx_src_vfid;
+ send_data = mbx_req->msg.data[0] == HCLGE_SUPPORTED ? supported :
+ advertising;
+ link_mode.idx = cpu_to_le16((u16)mbx_req->msg.data[0]);
+ link_mode.link_mode = cpu_to_le64(send_data);
+
+ hclge_send_mbx_msg(vport, (u8 *)&link_mode, sizeof(link_mode),
+ HCLGE_MBX_LINK_STAT_MODE, dest_vfid);
+}
+
+static int hclge_mbx_reset_vf_queue(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+ struct hclge_respond_to_vf_msg *resp_msg)
+{
+#define HCLGE_RESET_ALL_QUEUE_DONE 1U
+ struct hnae3_handle *handle = &vport->nic;
+ struct hclge_dev *hdev = vport->back;
+ u16 queue_id;
+ int ret;
+
+ queue_id = le16_to_cpu(*(__le16 *)mbx_req->msg.data);
+ resp_msg->data[0] = HCLGE_RESET_ALL_QUEUE_DONE;
+ resp_msg->len = sizeof(u8);
+
+ /* pf will reset vf's all queues at a time. So it is unnecessary
+ * to reset queues if queue_id > 0, just return success.
+ */
+ if (queue_id > 0)
+ return 0;
+
+ ret = hclge_reset_tqp(handle);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "failed to reset vf %u queue, ret = %d\n",
+ vport->vport_id - HCLGE_VF_VPORT_START_NUM, ret);
+
+ return ret;
+}
+
+static int hclge_reset_vf(struct hclge_vport *vport)
+{
+ struct hclge_dev *hdev = vport->back;
+
+ dev_warn(&hdev->pdev->dev, "PF received VF reset request from VF %u!",
+ vport->vport_id - HCLGE_VF_VPORT_START_NUM);
+
+ return hclge_func_reset_cmd(hdev, vport->vport_id);
+}
+
+static void hclge_notify_vf_config(struct hclge_vport *vport)
+{
+ struct hclge_dev *hdev = vport->back;
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ struct hclge_port_base_vlan_config *vlan_cfg;
+ int ret;
+
+ hclge_push_vf_link_status(vport);
+ if (test_bit(HCLGE_VPORT_NEED_NOTIFY_RESET, &vport->need_notify)) {
+ ret = hclge_inform_vf_reset(vport, HNAE3_VF_PF_FUNC_RESET);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to inform VF %u reset!",
+ vport->vport_id - HCLGE_VF_VPORT_START_NUM);
+ return;
+ }
+ vport->need_notify = 0;
+ return;
+ }
+
+ if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 &&
+ test_bit(HCLGE_VPORT_NEED_NOTIFY_VF_VLAN, &vport->need_notify)) {
+ vlan_cfg = &vport->port_base_vlan_cfg;
+ ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
+ vport->vport_id,
+ vlan_cfg->state,
+ &vlan_cfg->vlan_info);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to inform VF %u port base vlan!",
+ vport->vport_id - HCLGE_VF_VPORT_START_NUM);
+ return;
+ }
+ clear_bit(HCLGE_VPORT_NEED_NOTIFY_VF_VLAN, &vport->need_notify);
+ }
+}
+
+static void hclge_vf_keep_alive(struct hclge_vport *vport)
+{
+ struct hclge_dev *hdev = vport->back;
+
+ vport->last_active_jiffies = jiffies;
+
+ if (test_bit(HCLGE_VPORT_STATE_INITED, &vport->state) &&
+ !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
+ set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
+ dev_info(&hdev->pdev->dev, "VF %u is alive!",
+ vport->vport_id - HCLGE_VF_VPORT_START_NUM);
+ hclge_notify_vf_config(vport);
+ }
+}
+
+static int hclge_set_vf_mtu(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+{
+ struct hclge_mbx_mtu_info *mtu_info;
+ u32 mtu;
+
+ mtu_info = (struct hclge_mbx_mtu_info *)mbx_req->msg.data;
+ mtu = le32_to_cpu(mtu_info->mtu);
+
+ return hclge_set_vport_mtu(vport, mtu);
+}
+
+static int hclge_get_queue_id_in_pf(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+ struct hclge_respond_to_vf_msg *resp_msg)
+{
+ struct hnae3_handle *handle = &vport->nic;
+ struct hclge_dev *hdev = vport->back;
+ u16 queue_id, qid_in_pf;
+
+ queue_id = le16_to_cpu(*(__le16 *)mbx_req->msg.data);
+ if (queue_id >= handle->kinfo.num_tqps) {
+ dev_err(&hdev->pdev->dev, "Invalid queue id(%u) from VF %u\n",
+ queue_id, mbx_req->mbx_src_vfid);
+ return -EINVAL;
+ }
+
+ qid_in_pf = hclge_covert_handle_qid_global(&vport->nic, queue_id);
+ *(__le16 *)resp_msg->data = cpu_to_le16(qid_in_pf);
+ resp_msg->len = sizeof(qid_in_pf);
+ return 0;
+}
+
+static int hclge_get_rss_key(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+ struct hclge_respond_to_vf_msg *resp_msg)
+{
+#define HCLGE_RSS_MBX_RESP_LEN 8
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_comm_rss_cfg *rss_cfg;
+ u8 index;
+
+ index = mbx_req->msg.data[0];
+ rss_cfg = &hdev->rss_cfg;
+
+ /* Check the query index of rss_hash_key from VF, make sure no
+ * more than the size of rss_hash_key.
+ */
+ if (((index + 1) * HCLGE_RSS_MBX_RESP_LEN) >
+ sizeof(rss_cfg->rss_hash_key)) {
+ dev_warn(&hdev->pdev->dev,
+ "failed to get the rss hash key, the index(%u) invalid !\n",
+ index);
+ return -EINVAL;
+ }
+
+ memcpy(resp_msg->data,
+ &rss_cfg->rss_hash_key[index * HCLGE_RSS_MBX_RESP_LEN],
+ HCLGE_RSS_MBX_RESP_LEN);
+ resp_msg->len = HCLGE_RSS_MBX_RESP_LEN;
+ return 0;
+}
+
+static void hclge_link_fail_parse(struct hclge_dev *hdev, u8 link_fail_code)
+{
+ switch (link_fail_code) {
+ case HCLGE_LF_REF_CLOCK_LOST:
+ dev_warn(&hdev->pdev->dev, "Reference clock lost!\n");
+ break;
+ case HCLGE_LF_XSFP_TX_DISABLE:
+ dev_warn(&hdev->pdev->dev, "SFP tx is disabled!\n");
+ break;
+ case HCLGE_LF_XSFP_ABSENT:
+ dev_warn(&hdev->pdev->dev, "SFP is absent!\n");
+ break;
+ default:
+ break;
+ }
+}
+
+static void hclge_handle_link_change_event(struct hclge_dev *hdev,
+ struct hclge_mbx_vf_to_pf_cmd *req)
+{
+ hclge_task_schedule(hdev, 0);
+
+ if (!req->msg.subcode)
+ hclge_link_fail_parse(hdev, req->msg.data[0]);
+}
+
+static bool hclge_cmd_crq_empty(struct hclge_hw *hw)
+{
+ u32 tail = hclge_read_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG);
+
+ return tail == hw->hw.cmq.crq.next_to_use;
+}
+
+static void hclge_handle_ncsi_error(struct hclge_dev *hdev)
+{
+ struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
+
+ ae_dev->ops->set_default_reset_request(ae_dev, HNAE3_GLOBAL_RESET);
+ dev_warn(&hdev->pdev->dev, "requesting reset due to NCSI error\n");
+ ae_dev->ops->reset_event(hdev->pdev, NULL);
+}
+
+static void hclge_handle_vf_tbl(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+{
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_vf_vlan_cfg *msg_cmd;
+
+ msg_cmd = (struct hclge_vf_vlan_cfg *)&mbx_req->msg;
+ if (msg_cmd->subcode == HCLGE_MBX_VPORT_LIST_CLEAR) {
+ hclge_rm_vport_all_mac_table(vport, true, HCLGE_MAC_ADDR_UC);
+ hclge_rm_vport_all_mac_table(vport, true, HCLGE_MAC_ADDR_MC);
+ hclge_rm_vport_all_vlan_table(vport, true);
+ } else {
+ dev_warn(&hdev->pdev->dev, "Invalid cmd(%u)\n",
+ msg_cmd->subcode);
+ }
+}
+
+static int
+hclge_mbx_map_ring_to_vector_handler(struct hclge_mbx_ops_param *param)
+{
+ return hclge_map_unmap_ring_to_vf_vector(param->vport, true,
+ param->req);
+}
+
+static int
+hclge_mbx_unmap_ring_to_vector_handler(struct hclge_mbx_ops_param *param)
+{
+ return hclge_map_unmap_ring_to_vf_vector(param->vport, false,
+ param->req);
+}
+
+static int
+hclge_mbx_get_ring_vector_map_handler(struct hclge_mbx_ops_param *param)
+{
+ int ret;
+
+ ret = hclge_get_vf_ring_vector_map(param->vport, param->req,
+ param->resp_msg);
+ if (ret)
+ dev_err(&param->vport->back->pdev->dev,
+ "PF fail(%d) to get VF ring vector map\n",
+ ret);
+ return ret;
+}
+
+static int hclge_mbx_set_promisc_mode_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_set_vf_promisc_mode(param->vport, param->req);
+ return 0;
+}
+
+static int hclge_mbx_set_unicast_handler(struct hclge_mbx_ops_param *param)
+{
+ int ret;
+
+ ret = hclge_set_vf_uc_mac_addr(param->vport, param->req);
+ if (ret)
+ dev_err(&param->vport->back->pdev->dev,
+ "PF fail(%d) to set VF UC MAC Addr\n",
+ ret);
+ return ret;
+}
+
+static int hclge_mbx_set_multicast_handler(struct hclge_mbx_ops_param *param)
+{
+ int ret;
+
+ ret = hclge_set_vf_mc_mac_addr(param->vport, param->req);
+ if (ret)
+ dev_err(&param->vport->back->pdev->dev,
+ "PF fail(%d) to set VF MC MAC Addr\n",
+ ret);
+ return ret;
+}
+
+static int hclge_mbx_set_vlan_handler(struct hclge_mbx_ops_param *param)
+{
+ int ret;
+
+ ret = hclge_set_vf_vlan_cfg(param->vport, param->req, param->resp_msg);
+ if (ret)
+ dev_err(&param->vport->back->pdev->dev,
+ "PF failed(%d) to config VF's VLAN\n",
+ ret);
+ return ret;
+}
+
+static int hclge_mbx_set_alive_handler(struct hclge_mbx_ops_param *param)
+{
+ int ret;
+
+ ret = hclge_set_vf_alive(param->vport, param->req);
+ if (ret)
+ dev_err(&param->vport->back->pdev->dev,
+ "PF failed(%d) to set VF's ALIVE\n",
+ ret);
+ return ret;
+}
+
+static int hclge_mbx_get_qinfo_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_get_vf_queue_info(param->vport, param->resp_msg);
+ return 0;
+}
+
+static int hclge_mbx_get_qdepth_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_get_vf_queue_depth(param->vport, param->resp_msg);
+ return 0;
+}
+
+static int hclge_mbx_get_basic_info_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_get_basic_info(param->vport, param->resp_msg);
+ return 0;
+}
+
+static int hclge_mbx_get_link_status_handler(struct hclge_mbx_ops_param *param)
+{
+ int ret;
+
+ ret = hclge_push_vf_link_status(param->vport);
+ if (ret)
+ dev_err(&param->vport->back->pdev->dev,
+ "failed to inform link stat to VF, ret = %d\n",
+ ret);
+ return ret;
+}
+
+static int hclge_mbx_queue_reset_handler(struct hclge_mbx_ops_param *param)
+{
+ return hclge_mbx_reset_vf_queue(param->vport, param->req,
+ param->resp_msg);
+}
+
+static int hclge_mbx_reset_handler(struct hclge_mbx_ops_param *param)
+{
+ return hclge_reset_vf(param->vport);
+}
+
+static int hclge_mbx_keep_alive_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_vf_keep_alive(param->vport);
+ return 0;
+}
+
+static int hclge_mbx_set_mtu_handler(struct hclge_mbx_ops_param *param)
+{
+ int ret;
+
+ ret = hclge_set_vf_mtu(param->vport, param->req);
+ if (ret)
+ dev_err(&param->vport->back->pdev->dev,
+ "VF fail(%d) to set mtu\n", ret);
+ return ret;
+}
+
+static int hclge_mbx_get_qid_in_pf_handler(struct hclge_mbx_ops_param *param)
+{
+ return hclge_get_queue_id_in_pf(param->vport, param->req,
+ param->resp_msg);
+}
+
+static int hclge_mbx_get_rss_key_handler(struct hclge_mbx_ops_param *param)
+{
+ return hclge_get_rss_key(param->vport, param->req, param->resp_msg);
+}
+
+static int hclge_mbx_get_link_mode_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_get_link_mode(param->vport, param->req);
+ return 0;
+}
+
+static int
+hclge_mbx_get_vf_flr_status_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_rm_vport_all_mac_table(param->vport, false,
+ HCLGE_MAC_ADDR_UC);
+ hclge_rm_vport_all_mac_table(param->vport, false,
+ HCLGE_MAC_ADDR_MC);
+ hclge_rm_vport_all_vlan_table(param->vport, false);
+ return 0;
+}
+
+static int hclge_mbx_vf_uninit_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_rm_vport_all_mac_table(param->vport, true,
+ HCLGE_MAC_ADDR_UC);
+ hclge_rm_vport_all_mac_table(param->vport, true,
+ HCLGE_MAC_ADDR_MC);
+ hclge_rm_vport_all_vlan_table(param->vport, true);
+ param->vport->mps = 0;
+ return 0;
+}
+
+static int hclge_mbx_get_media_type_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_get_vf_media_type(param->vport, param->resp_msg);
+ return 0;
+}
+
+static int hclge_mbx_push_link_status_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_handle_link_change_event(param->vport->back, param->req);
+ return 0;
+}
+
+static int hclge_mbx_get_mac_addr_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_get_vf_mac_addr(param->vport, param->resp_msg);
+ return 0;
+}
+
+static int hclge_mbx_ncsi_error_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_handle_ncsi_error(param->vport->back);
+ return 0;
+}
+
+static int hclge_mbx_handle_vf_tbl_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_handle_vf_tbl(param->vport, param->req);
+ return 0;
+}
+
+static const hclge_mbx_ops_fn hclge_mbx_ops_list[HCLGE_MBX_OPCODE_MAX] = {
+ [HCLGE_MBX_RESET] = hclge_mbx_reset_handler,
+ [HCLGE_MBX_SET_UNICAST] = hclge_mbx_set_unicast_handler,
+ [HCLGE_MBX_SET_MULTICAST] = hclge_mbx_set_multicast_handler,
+ [HCLGE_MBX_SET_VLAN] = hclge_mbx_set_vlan_handler,
+ [HCLGE_MBX_MAP_RING_TO_VECTOR] = hclge_mbx_map_ring_to_vector_handler,
+ [HCLGE_MBX_UNMAP_RING_TO_VECTOR] = hclge_mbx_unmap_ring_to_vector_handler,
+ [HCLGE_MBX_SET_PROMISC_MODE] = hclge_mbx_set_promisc_mode_handler,
+ [HCLGE_MBX_GET_QINFO] = hclge_mbx_get_qinfo_handler,
+ [HCLGE_MBX_GET_QDEPTH] = hclge_mbx_get_qdepth_handler,
+ [HCLGE_MBX_GET_BASIC_INFO] = hclge_mbx_get_basic_info_handler,
+ [HCLGE_MBX_GET_RSS_KEY] = hclge_mbx_get_rss_key_handler,
+ [HCLGE_MBX_GET_MAC_ADDR] = hclge_mbx_get_mac_addr_handler,
+ [HCLGE_MBX_GET_LINK_STATUS] = hclge_mbx_get_link_status_handler,
+ [HCLGE_MBX_QUEUE_RESET] = hclge_mbx_queue_reset_handler,
+ [HCLGE_MBX_KEEP_ALIVE] = hclge_mbx_keep_alive_handler,
+ [HCLGE_MBX_SET_ALIVE] = hclge_mbx_set_alive_handler,
+ [HCLGE_MBX_SET_MTU] = hclge_mbx_set_mtu_handler,
+ [HCLGE_MBX_GET_QID_IN_PF] = hclge_mbx_get_qid_in_pf_handler,
+ [HCLGE_MBX_GET_LINK_MODE] = hclge_mbx_get_link_mode_handler,
+ [HCLGE_MBX_GET_MEDIA_TYPE] = hclge_mbx_get_media_type_handler,
+ [HCLGE_MBX_VF_UNINIT] = hclge_mbx_vf_uninit_handler,
+ [HCLGE_MBX_HANDLE_VF_TBL] = hclge_mbx_handle_vf_tbl_handler,
+ [HCLGE_MBX_GET_RING_VECTOR_MAP] = hclge_mbx_get_ring_vector_map_handler,
+ [HCLGE_MBX_GET_VF_FLR_STATUS] = hclge_mbx_get_vf_flr_status_handler,
+ [HCLGE_MBX_PUSH_LINK_STATUS] = hclge_mbx_push_link_status_handler,
+ [HCLGE_MBX_NCSI_ERROR] = hclge_mbx_ncsi_error_handler,
+};
+
+static void hclge_mbx_request_handling(struct hclge_mbx_ops_param *param)
+{
+ hclge_mbx_ops_fn cmd_func = NULL;
+ struct hclge_dev *hdev;
+ int ret = 0;
+
+ hdev = param->vport->back;
+ cmd_func = hclge_mbx_ops_list[param->req->msg.code];
+ if (cmd_func)
+ ret = cmd_func(param);
+ else
+ dev_err(&hdev->pdev->dev,
+ "un-supported mailbox message, code = %u\n",
+ param->req->msg.code);
+
+ /* PF driver should not reply IMP */
+ if (hnae3_get_bit(param->req->mbx_need_resp, HCLGE_MBX_NEED_RESP_B) &&
+ param->req->msg.code < HCLGE_MBX_GET_VF_FLR_STATUS) {
+ param->resp_msg->status = ret;
+ if (time_is_before_jiffies(hdev->last_mbx_scheduled +
+ HCLGE_MBX_SCHED_TIMEOUT))
+ dev_warn(&hdev->pdev->dev,
+ "resp vport%u mbx(%u,%u) late\n",
+ param->req->mbx_src_vfid,
+ param->req->msg.code,
+ param->req->msg.subcode);
+
+ hclge_gen_resp_to_vf(param->vport, param->req, param->resp_msg);
+ }
+}
+
+void hclge_mbx_handler(struct hclge_dev *hdev)
+{
+ struct hclge_comm_cmq_ring *crq = &hdev->hw.hw.cmq.crq;
+ struct hclge_respond_to_vf_msg resp_msg;
+ struct hclge_mbx_vf_to_pf_cmd *req;
+ struct hclge_mbx_ops_param param;
+ struct hclge_desc *desc;
+ unsigned int flag;
+
+ param.resp_msg = &resp_msg;
+ /* handle all the mailbox requests in the queue */
+ while (!hclge_cmd_crq_empty(&hdev->hw)) {
+ if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE,
+ &hdev->hw.hw.comm_state)) {
+ dev_warn(&hdev->pdev->dev,
+ "command queue needs re-initializing\n");
+ return;
+ }
+
+ desc = &crq->desc[crq->next_to_use];
+ req = (struct hclge_mbx_vf_to_pf_cmd *)desc->data;
+
+ flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
+ if (unlikely(!hnae3_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B))) {
+ dev_warn(&hdev->pdev->dev,
+ "dropped invalid mailbox message, code = %u\n",
+ req->msg.code);
+
+ /* dropping/not processing this invalid message */
+ crq->desc[crq->next_to_use].flag = 0;
+ hclge_mbx_ring_ptr_move_crq(crq);
+ continue;
+ }
+
+ trace_hclge_pf_mbx_get(hdev, req);
+
+ /* clear the resp_msg before processing every mailbox message */
+ memset(&resp_msg, 0, sizeof(resp_msg));
+ param.vport = &hdev->vport[req->mbx_src_vfid];
+ param.req = req;
+ hclge_mbx_request_handling(&param);
+
+ crq->desc[crq->next_to_use].flag = 0;
+ hclge_mbx_ring_ptr_move_crq(crq);
+ }
+
+ /* Write back CMDQ_RQ header pointer, M7 need this pointer */
+ hclge_write_dev(&hdev->hw, HCLGE_COMM_NIC_CRQ_HEAD_REG,
+ crq->next_to_use);
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
new file mode 100644
index 0000000000..85fb11de43
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -0,0 +1,311 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
+
+#include <linux/etherdevice.h>
+#include <linux/kernel.h>
+#include <linux/marvell_phy.h>
+
+#include "hclge_cmd.h"
+#include "hclge_main.h"
+#include "hclge_mdio.h"
+
+enum hclge_mdio_c22_op_seq {
+ HCLGE_MDIO_C22_WRITE = 1,
+ HCLGE_MDIO_C22_READ = 2
+};
+
+#define HCLGE_MDIO_CTRL_START_B 0
+#define HCLGE_MDIO_CTRL_ST_S 1
+#define HCLGE_MDIO_CTRL_ST_M (0x3 << HCLGE_MDIO_CTRL_ST_S)
+#define HCLGE_MDIO_CTRL_OP_S 3
+#define HCLGE_MDIO_CTRL_OP_M (0x3 << HCLGE_MDIO_CTRL_OP_S)
+
+#define HCLGE_MDIO_PHYID_S 0
+#define HCLGE_MDIO_PHYID_M (0x1f << HCLGE_MDIO_PHYID_S)
+
+#define HCLGE_MDIO_PHYREG_S 0
+#define HCLGE_MDIO_PHYREG_M (0x1f << HCLGE_MDIO_PHYREG_S)
+
+#define HCLGE_MDIO_STA_B 0
+
+struct hclge_mdio_cfg_cmd {
+ u8 ctrl_bit;
+ u8 phyid;
+ u8 phyad;
+ u8 rsvd;
+ __le16 reserve;
+ __le16 data_wr;
+ __le16 data_rd;
+ __le16 sta;
+};
+
+static int hclge_mdio_write(struct mii_bus *bus, int phyid, int regnum,
+ u16 data)
+{
+ struct hclge_mdio_cfg_cmd *mdio_cmd;
+ struct hclge_dev *hdev = bus->priv;
+ struct hclge_desc desc;
+ int ret;
+
+ if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state))
+ return -EBUSY;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MDIO_CONFIG, false);
+
+ mdio_cmd = (struct hclge_mdio_cfg_cmd *)desc.data;
+
+ hnae3_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M,
+ HCLGE_MDIO_PHYID_S, (u32)phyid);
+ hnae3_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M,
+ HCLGE_MDIO_PHYREG_S, (u32)regnum);
+
+ hnae3_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1);
+ hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M,
+ HCLGE_MDIO_CTRL_ST_S, 1);
+ hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_OP_M,
+ HCLGE_MDIO_CTRL_OP_S, HCLGE_MDIO_C22_WRITE);
+
+ mdio_cmd->data_wr = cpu_to_le16(data);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "mdio write fail when sending cmd, status is %d.\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum)
+{
+ struct hclge_mdio_cfg_cmd *mdio_cmd;
+ struct hclge_dev *hdev = bus->priv;
+ struct hclge_desc desc;
+ int ret;
+
+ if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state))
+ return -EBUSY;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MDIO_CONFIG, true);
+
+ mdio_cmd = (struct hclge_mdio_cfg_cmd *)desc.data;
+
+ hnae3_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M,
+ HCLGE_MDIO_PHYID_S, (u32)phyid);
+ hnae3_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M,
+ HCLGE_MDIO_PHYREG_S, (u32)regnum);
+
+ hnae3_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1);
+ hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M,
+ HCLGE_MDIO_CTRL_ST_S, 1);
+ hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_OP_M,
+ HCLGE_MDIO_CTRL_OP_S, HCLGE_MDIO_C22_READ);
+
+ /* Read out phy data */
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "mdio read fail when get data, status is %d.\n",
+ ret);
+ return ret;
+ }
+
+ if (hnae3_get_bit(le16_to_cpu(mdio_cmd->sta), HCLGE_MDIO_STA_B)) {
+ dev_err(&hdev->pdev->dev, "mdio read data error\n");
+ return -EIO;
+ }
+
+ return le16_to_cpu(mdio_cmd->data_rd);
+}
+
+int hclge_mac_mdio_config(struct hclge_dev *hdev)
+{
+#define PHY_INEXISTENT 255
+
+ struct hclge_mac *mac = &hdev->hw.mac;
+ struct phy_device *phydev;
+ struct mii_bus *mdio_bus;
+ int ret;
+
+ if (hdev->hw.mac.phy_addr == PHY_INEXISTENT) {
+ dev_info(&hdev->pdev->dev,
+ "no phy device is connected to mdio bus\n");
+ return 0;
+ } else if (hdev->hw.mac.phy_addr >= PHY_MAX_ADDR) {
+ dev_err(&hdev->pdev->dev, "phy_addr(%u) is too large.\n",
+ hdev->hw.mac.phy_addr);
+ return -EINVAL;
+ }
+
+ mdio_bus = devm_mdiobus_alloc(&hdev->pdev->dev);
+ if (!mdio_bus)
+ return -ENOMEM;
+
+ mdio_bus->name = "hisilicon MII bus";
+ mdio_bus->read = hclge_mdio_read;
+ mdio_bus->write = hclge_mdio_write;
+ snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-%s", "mii",
+ dev_name(&hdev->pdev->dev));
+
+ mdio_bus->parent = &hdev->pdev->dev;
+ mdio_bus->priv = hdev;
+ mdio_bus->phy_mask = ~(1 << mac->phy_addr);
+ ret = mdiobus_register(mdio_bus);
+ if (ret) {
+ dev_err(mdio_bus->parent,
+ "failed to register MDIO bus, ret = %d\n", ret);
+ return ret;
+ }
+
+ phydev = mdiobus_get_phy(mdio_bus, mac->phy_addr);
+ if (!phydev) {
+ dev_err(mdio_bus->parent, "Failed to get phy device\n");
+ mdiobus_unregister(mdio_bus);
+ return -EIO;
+ }
+
+ mac->phydev = phydev;
+ mac->mdio_bus = mdio_bus;
+
+ return 0;
+}
+
+static void hclge_mac_adjust_link(struct net_device *netdev)
+{
+ struct hnae3_handle *h = *((void **)netdev_priv(netdev));
+ struct hclge_vport *vport = hclge_get_vport(h);
+ struct hclge_dev *hdev = vport->back;
+ int duplex, speed;
+ int ret;
+
+ /* When phy link down, do nothing */
+ if (netdev->phydev->link == 0)
+ return;
+
+ speed = netdev->phydev->speed;
+ duplex = netdev->phydev->duplex;
+
+ ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex, 0);
+ if (ret)
+ netdev_err(netdev, "failed to adjust link.\n");
+
+ ret = hclge_cfg_flowctrl(hdev);
+ if (ret)
+ netdev_err(netdev, "failed to configure flow control.\n");
+}
+
+int hclge_mac_connect_phy(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct net_device *netdev = hdev->vport[0].nic.netdev;
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+ int ret;
+
+ if (!phydev)
+ return 0;
+
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, phydev->supported);
+
+ phydev->dev_flags |= MARVELL_PHY_LED0_LINK_LED1_ACTIVE;
+
+ ret = phy_connect_direct(netdev, phydev,
+ hclge_mac_adjust_link,
+ PHY_INTERFACE_MODE_SGMII);
+ if (ret) {
+ netdev_err(netdev, "phy_connect_direct err.\n");
+ return ret;
+ }
+
+ linkmode_copy(mask, hdev->hw.mac.supported);
+ linkmode_and(phydev->supported, phydev->supported, mask);
+ linkmode_copy(phydev->advertising, phydev->supported);
+
+ /* supported flag is Pause and Asym Pause, but default advertising
+ * should be rx on, tx on, so need clear Asym Pause in advertising
+ * flag
+ */
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ phydev->advertising);
+
+ phy_attached_info(phydev);
+
+ return 0;
+}
+
+void hclge_mac_disconnect_phy(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+
+ if (!phydev)
+ return;
+
+ phy_disconnect(phydev);
+}
+
+void hclge_mac_start_phy(struct hclge_dev *hdev)
+{
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+
+ if (!phydev)
+ return;
+
+ phy_loopback(phydev, false);
+
+ phy_start(phydev);
+}
+
+void hclge_mac_stop_phy(struct hclge_dev *hdev)
+{
+ struct net_device *netdev = hdev->vport[0].nic.netdev;
+ struct phy_device *phydev = netdev->phydev;
+
+ if (!phydev)
+ return;
+
+ phy_stop(phydev);
+}
+
+u16 hclge_read_phy_reg(struct hclge_dev *hdev, u16 reg_addr)
+{
+ struct hclge_phy_reg_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PHY_REG, true);
+
+ req = (struct hclge_phy_reg_cmd *)desc.data;
+ req->reg_addr = cpu_to_le16(reg_addr);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to read phy reg, ret = %d.\n", ret);
+
+ return le16_to_cpu(req->reg_val);
+}
+
+int hclge_write_phy_reg(struct hclge_dev *hdev, u16 reg_addr, u16 val)
+{
+ struct hclge_phy_reg_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PHY_REG, false);
+
+ req = (struct hclge_phy_reg_cmd *)desc.data;
+ req->reg_addr = cpu_to_le16(reg_addr);
+ req->reg_val = cpu_to_le16(val);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to write phy reg, ret = %d.\n", ret);
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
new file mode 100644
index 0000000000..4200d0b6d9
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+// Copyright (c) 2016-2017 Hisilicon Limited.
+
+#ifndef __HCLGE_MDIO_H
+#define __HCLGE_MDIO_H
+
+#include "hnae3.h"
+
+struct hclge_dev;
+
+int hclge_mac_mdio_config(struct hclge_dev *hdev);
+int hclge_mac_connect_phy(struct hnae3_handle *handle);
+void hclge_mac_disconnect_phy(struct hnae3_handle *handle);
+void hclge_mac_start_phy(struct hclge_dev *hdev);
+void hclge_mac_stop_phy(struct hclge_dev *hdev);
+u16 hclge_read_phy_reg(struct hclge_dev *hdev, u16 reg_addr);
+int hclge_write_phy_reg(struct hclge_dev *hdev, u16 reg_addr, u16 val);
+
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
new file mode 100644
index 0000000000..80a2a0073d
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
@@ -0,0 +1,552 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2021 Hisilicon Limited.
+
+#include <linux/skbuff.h>
+#include "hclge_main.h"
+#include "hnae3.h"
+
+static int hclge_ptp_get_cycle(struct hclge_dev *hdev)
+{
+ struct hclge_ptp *ptp = hdev->ptp;
+
+ ptp->cycle.quo = readl(hdev->ptp->io_base + HCLGE_PTP_CYCLE_QUO_REG) &
+ HCLGE_PTP_CYCLE_QUO_MASK;
+ ptp->cycle.numer = readl(hdev->ptp->io_base + HCLGE_PTP_CYCLE_NUM_REG);
+ ptp->cycle.den = readl(hdev->ptp->io_base + HCLGE_PTP_CYCLE_DEN_REG);
+
+ if (ptp->cycle.den == 0) {
+ dev_err(&hdev->pdev->dev, "invalid ptp cycle denominator!\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hclge_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct hclge_dev *hdev = hclge_ptp_get_hdev(ptp);
+ struct hclge_ptp_cycle *cycle = &hdev->ptp->cycle;
+ u64 adj_val, adj_base;
+ unsigned long flags;
+ u32 quo, numerator;
+
+ adj_base = (u64)cycle->quo * (u64)cycle->den + (u64)cycle->numer;
+ adj_val = adjust_by_scaled_ppm(adj_base, scaled_ppm);
+
+ /* This clock cycle is defined by three part: quotient, numerator
+ * and denominator. For example, 2.5ns, the quotient is 2,
+ * denominator is fixed to ptp->cycle.den, and numerator
+ * is 0.5 * ptp->cycle.den.
+ */
+ quo = div_u64_rem(adj_val, cycle->den, &numerator);
+
+ spin_lock_irqsave(&hdev->ptp->lock, flags);
+ writel(quo & HCLGE_PTP_CYCLE_QUO_MASK,
+ hdev->ptp->io_base + HCLGE_PTP_CYCLE_QUO_REG);
+ writel(numerator, hdev->ptp->io_base + HCLGE_PTP_CYCLE_NUM_REG);
+ writel(cycle->den, hdev->ptp->io_base + HCLGE_PTP_CYCLE_DEN_REG);
+ writel(HCLGE_PTP_CYCLE_ADJ_EN,
+ hdev->ptp->io_base + HCLGE_PTP_CYCLE_CFG_REG);
+ spin_unlock_irqrestore(&hdev->ptp->lock, flags);
+
+ return 0;
+}
+
+bool hclge_ptp_set_tx_info(struct hnae3_handle *handle, struct sk_buff *skb)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_ptp *ptp = hdev->ptp;
+
+ if (!test_bit(HCLGE_PTP_FLAG_TX_EN, &ptp->flags) ||
+ test_and_set_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state)) {
+ ptp->tx_skipped++;
+ return false;
+ }
+
+ ptp->tx_start = jiffies;
+ ptp->tx_skb = skb_get(skb);
+ ptp->tx_cnt++;
+
+ return true;
+}
+
+void hclge_ptp_clean_tx_hwts(struct hclge_dev *hdev)
+{
+ struct sk_buff *skb = hdev->ptp->tx_skb;
+ struct skb_shared_hwtstamps hwts;
+ u32 hi, lo;
+ u64 ns;
+
+ ns = readl(hdev->ptp->io_base + HCLGE_PTP_TX_TS_NSEC_REG) &
+ HCLGE_PTP_TX_TS_NSEC_MASK;
+ lo = readl(hdev->ptp->io_base + HCLGE_PTP_TX_TS_SEC_L_REG);
+ hi = readl(hdev->ptp->io_base + HCLGE_PTP_TX_TS_SEC_H_REG) &
+ HCLGE_PTP_TX_TS_SEC_H_MASK;
+ hdev->ptp->last_tx_seqid = readl(hdev->ptp->io_base +
+ HCLGE_PTP_TX_TS_SEQID_REG);
+
+ if (skb) {
+ hdev->ptp->tx_skb = NULL;
+ hdev->ptp->tx_cleaned++;
+
+ ns += (((u64)hi) << 32 | lo) * NSEC_PER_SEC;
+ hwts.hwtstamp = ns_to_ktime(ns);
+ skb_tstamp_tx(skb, &hwts);
+ dev_kfree_skb_any(skb);
+ }
+
+ clear_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state);
+}
+
+void hclge_ptp_get_rx_hwts(struct hnae3_handle *handle, struct sk_buff *skb,
+ u32 nsec, u32 sec)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ unsigned long flags;
+ u64 ns = nsec;
+ u32 sec_h;
+
+ if (!test_bit(HCLGE_PTP_FLAG_RX_EN, &hdev->ptp->flags))
+ return;
+
+ /* Since the BD does not have enough space for the higher 16 bits of
+ * second, and this part will not change frequently, so read it
+ * from register.
+ */
+ spin_lock_irqsave(&hdev->ptp->lock, flags);
+ sec_h = readl(hdev->ptp->io_base + HCLGE_PTP_CUR_TIME_SEC_H_REG);
+ spin_unlock_irqrestore(&hdev->ptp->lock, flags);
+
+ ns += (((u64)sec_h) << HCLGE_PTP_SEC_H_OFFSET | sec) * NSEC_PER_SEC;
+ skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
+ hdev->ptp->last_rx = jiffies;
+ hdev->ptp->rx_cnt++;
+}
+
+static int hclge_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct hclge_dev *hdev = hclge_ptp_get_hdev(ptp);
+ unsigned long flags;
+ u32 hi, lo;
+ u64 ns;
+
+ spin_lock_irqsave(&hdev->ptp->lock, flags);
+ ns = readl(hdev->ptp->io_base + HCLGE_PTP_CUR_TIME_NSEC_REG);
+ hi = readl(hdev->ptp->io_base + HCLGE_PTP_CUR_TIME_SEC_H_REG);
+ lo = readl(hdev->ptp->io_base + HCLGE_PTP_CUR_TIME_SEC_L_REG);
+ spin_unlock_irqrestore(&hdev->ptp->lock, flags);
+
+ ns += (((u64)hi) << HCLGE_PTP_SEC_H_OFFSET | lo) * NSEC_PER_SEC;
+ *ts = ns_to_timespec64(ns);
+
+ return 0;
+}
+
+static int hclge_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct hclge_dev *hdev = hclge_ptp_get_hdev(ptp);
+ unsigned long flags;
+
+ spin_lock_irqsave(&hdev->ptp->lock, flags);
+ writel(ts->tv_nsec, hdev->ptp->io_base + HCLGE_PTP_TIME_NSEC_REG);
+ writel(ts->tv_sec >> HCLGE_PTP_SEC_H_OFFSET,
+ hdev->ptp->io_base + HCLGE_PTP_TIME_SEC_H_REG);
+ writel(ts->tv_sec & HCLGE_PTP_SEC_L_MASK,
+ hdev->ptp->io_base + HCLGE_PTP_TIME_SEC_L_REG);
+ /* synchronize the time of phc */
+ writel(HCLGE_PTP_TIME_SYNC_EN,
+ hdev->ptp->io_base + HCLGE_PTP_TIME_SYNC_REG);
+ spin_unlock_irqrestore(&hdev->ptp->lock, flags);
+
+ return 0;
+}
+
+static int hclge_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct hclge_dev *hdev = hclge_ptp_get_hdev(ptp);
+ unsigned long flags;
+ bool is_neg = false;
+ u32 adj_val = 0;
+
+ if (delta < 0) {
+ adj_val |= HCLGE_PTP_TIME_NSEC_NEG;
+ delta = -delta;
+ is_neg = true;
+ }
+
+ if (delta > HCLGE_PTP_TIME_NSEC_MASK) {
+ struct timespec64 ts;
+ s64 ns;
+
+ hclge_ptp_gettimex(ptp, &ts, NULL);
+ ns = timespec64_to_ns(&ts);
+ ns = is_neg ? ns - delta : ns + delta;
+ ts = ns_to_timespec64(ns);
+ return hclge_ptp_settime(ptp, &ts);
+ }
+
+ adj_val |= delta & HCLGE_PTP_TIME_NSEC_MASK;
+
+ spin_lock_irqsave(&hdev->ptp->lock, flags);
+ writel(adj_val, hdev->ptp->io_base + HCLGE_PTP_TIME_NSEC_REG);
+ writel(HCLGE_PTP_TIME_ADJ_EN,
+ hdev->ptp->io_base + HCLGE_PTP_TIME_ADJ_REG);
+ spin_unlock_irqrestore(&hdev->ptp->lock, flags);
+
+ return 0;
+}
+
+int hclge_ptp_get_cfg(struct hclge_dev *hdev, struct ifreq *ifr)
+{
+ if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state))
+ return -EOPNOTSUPP;
+
+ return copy_to_user(ifr->ifr_data, &hdev->ptp->ts_cfg,
+ sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
+}
+
+static int hclge_ptp_int_en(struct hclge_dev *hdev, bool en)
+{
+ struct hclge_ptp_int_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ req = (struct hclge_ptp_int_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PTP_INT_EN, false);
+ req->int_en = en ? 1 : 0;
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to %s ptp interrupt, ret = %d\n",
+ en ? "enable" : "disable", ret);
+
+ return ret;
+}
+
+int hclge_ptp_cfg_qry(struct hclge_dev *hdev, u32 *cfg)
+{
+ struct hclge_ptp_cfg_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ req = (struct hclge_ptp_cfg_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PTP_MODE_CFG, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to query ptp config, ret = %d\n", ret);
+ return ret;
+ }
+
+ *cfg = le32_to_cpu(req->cfg);
+
+ return 0;
+}
+
+static int hclge_ptp_cfg(struct hclge_dev *hdev, u32 cfg)
+{
+ struct hclge_ptp_cfg_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ req = (struct hclge_ptp_cfg_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PTP_MODE_CFG, false);
+ req->cfg = cpu_to_le32(cfg);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to config ptp, ret = %d\n", ret);
+
+ return ret;
+}
+
+static int hclge_ptp_set_tx_mode(struct hwtstamp_config *cfg,
+ unsigned long *flags, u32 *ptp_cfg)
+{
+ switch (cfg->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ clear_bit(HCLGE_PTP_FLAG_TX_EN, flags);
+ break;
+ case HWTSTAMP_TX_ON:
+ set_bit(HCLGE_PTP_FLAG_TX_EN, flags);
+ *ptp_cfg |= HCLGE_PTP_TX_EN_B;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
+static int hclge_ptp_set_rx_mode(struct hwtstamp_config *cfg,
+ unsigned long *flags, u32 *ptp_cfg)
+{
+ int rx_filter = cfg->rx_filter;
+
+ switch (cfg->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ clear_bit(HCLGE_PTP_FLAG_RX_EN, flags);
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ set_bit(HCLGE_PTP_FLAG_RX_EN, flags);
+ *ptp_cfg |= HCLGE_PTP_RX_EN_B;
+ *ptp_cfg |= HCLGE_PTP_UDP_FULL_TYPE << HCLGE_PTP_UDP_EN_SHIFT;
+ rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ set_bit(HCLGE_PTP_FLAG_RX_EN, flags);
+ *ptp_cfg |= HCLGE_PTP_RX_EN_B;
+ *ptp_cfg |= HCLGE_PTP_UDP_FULL_TYPE << HCLGE_PTP_UDP_EN_SHIFT;
+ *ptp_cfg |= HCLGE_PTP_MSG1_V2_DEFAULT << HCLGE_PTP_MSG1_SHIFT;
+ *ptp_cfg |= HCLGE_PTP_MSG0_V2_EVENT << HCLGE_PTP_MSG0_SHIFT;
+ *ptp_cfg |= HCLGE_PTP_MSG_TYPE_V2 << HCLGE_PTP_MSG_TYPE_SHIFT;
+ rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ default:
+ return -ERANGE;
+ }
+
+ cfg->rx_filter = rx_filter;
+
+ return 0;
+}
+
+static int hclge_ptp_set_ts_mode(struct hclge_dev *hdev,
+ struct hwtstamp_config *cfg)
+{
+ unsigned long flags = hdev->ptp->flags;
+ u32 ptp_cfg = 0;
+ int ret;
+
+ if (test_bit(HCLGE_PTP_FLAG_EN, &hdev->ptp->flags))
+ ptp_cfg |= HCLGE_PTP_EN_B;
+
+ ret = hclge_ptp_set_tx_mode(cfg, &flags, &ptp_cfg);
+ if (ret)
+ return ret;
+
+ ret = hclge_ptp_set_rx_mode(cfg, &flags, &ptp_cfg);
+ if (ret)
+ return ret;
+
+ ret = hclge_ptp_cfg(hdev, ptp_cfg);
+ if (ret)
+ return ret;
+
+ hdev->ptp->flags = flags;
+ hdev->ptp->ptp_cfg = ptp_cfg;
+
+ return 0;
+}
+
+int hclge_ptp_set_cfg(struct hclge_dev *hdev, struct ifreq *ifr)
+{
+ struct hwtstamp_config cfg;
+ int ret;
+
+ if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state)) {
+ dev_err(&hdev->pdev->dev, "phc is unsupported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
+ return -EFAULT;
+
+ ret = hclge_ptp_set_ts_mode(hdev, &cfg);
+ if (ret)
+ return ret;
+
+ hdev->ptp->ts_cfg = cfg;
+
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+
+int hclge_ptp_get_ts_info(struct hnae3_handle *handle,
+ struct ethtool_ts_info *info)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state)) {
+ dev_err(&hdev->pdev->dev, "phc is unsupported\n");
+ return -EOPNOTSUPP;
+ }
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ if (hdev->ptp->clock)
+ info->phc_index = ptp_clock_index(hdev->ptp->clock);
+ else
+ info->phc_index = -1;
+
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
+
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ);
+
+ info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ);
+
+ return 0;
+}
+
+static int hclge_ptp_create_clock(struct hclge_dev *hdev)
+{
+ struct hclge_ptp *ptp;
+
+ ptp = devm_kzalloc(&hdev->pdev->dev, sizeof(*ptp), GFP_KERNEL);
+ if (!ptp)
+ return -ENOMEM;
+
+ ptp->hdev = hdev;
+ snprintf(ptp->info.name, sizeof(ptp->info.name), "%s",
+ HCLGE_DRIVER_NAME);
+ ptp->info.owner = THIS_MODULE;
+ ptp->info.max_adj = HCLGE_PTP_CYCLE_ADJ_MAX;
+ ptp->info.n_ext_ts = 0;
+ ptp->info.pps = 0;
+ ptp->info.adjfine = hclge_ptp_adjfine;
+ ptp->info.adjtime = hclge_ptp_adjtime;
+ ptp->info.gettimex64 = hclge_ptp_gettimex;
+ ptp->info.settime64 = hclge_ptp_settime;
+
+ ptp->info.n_alarm = 0;
+ ptp->clock = ptp_clock_register(&ptp->info, &hdev->pdev->dev);
+ if (IS_ERR(ptp->clock)) {
+ dev_err(&hdev->pdev->dev,
+ "%d failed to register ptp clock, ret = %ld\n",
+ ptp->info.n_alarm, PTR_ERR(ptp->clock));
+ return -ENODEV;
+ } else if (!ptp->clock) {
+ dev_err(&hdev->pdev->dev, "failed to register ptp clock\n");
+ return -ENODEV;
+ }
+
+ spin_lock_init(&ptp->lock);
+ ptp->io_base = hdev->hw.hw.io_base + HCLGE_PTP_REG_OFFSET;
+ ptp->ts_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
+ ptp->ts_cfg.tx_type = HWTSTAMP_TX_OFF;
+ hdev->ptp = ptp;
+
+ return 0;
+}
+
+static void hclge_ptp_destroy_clock(struct hclge_dev *hdev)
+{
+ ptp_clock_unregister(hdev->ptp->clock);
+ hdev->ptp->clock = NULL;
+ devm_kfree(&hdev->pdev->dev, hdev->ptp);
+ hdev->ptp = NULL;
+}
+
+int hclge_ptp_init(struct hclge_dev *hdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ struct timespec64 ts;
+ int ret;
+
+ if (!test_bit(HNAE3_DEV_SUPPORT_PTP_B, ae_dev->caps))
+ return 0;
+
+ if (!hdev->ptp) {
+ ret = hclge_ptp_create_clock(hdev);
+ if (ret)
+ return ret;
+
+ ret = hclge_ptp_get_cycle(hdev);
+ if (ret)
+ return ret;
+ }
+
+ ret = hclge_ptp_int_en(hdev, true);
+ if (ret)
+ goto out;
+
+ set_bit(HCLGE_PTP_FLAG_EN, &hdev->ptp->flags);
+ ret = hclge_ptp_adjfine(&hdev->ptp->info, 0);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to init freq, ret = %d\n", ret);
+ goto out;
+ }
+
+ ret = hclge_ptp_set_ts_mode(hdev, &hdev->ptp->ts_cfg);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to init ts mode, ret = %d\n", ret);
+ goto out;
+ }
+
+ ktime_get_real_ts64(&ts);
+ ret = hclge_ptp_settime(&hdev->ptp->info, &ts);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to init ts time, ret = %d\n", ret);
+ goto out;
+ }
+
+ set_bit(HCLGE_STATE_PTP_EN, &hdev->state);
+ dev_info(&hdev->pdev->dev, "phc initializes ok!\n");
+
+ return 0;
+
+out:
+ hclge_ptp_destroy_clock(hdev);
+
+ return ret;
+}
+
+void hclge_ptp_uninit(struct hclge_dev *hdev)
+{
+ struct hclge_ptp *ptp = hdev->ptp;
+
+ if (!ptp)
+ return;
+
+ hclge_ptp_int_en(hdev, false);
+ clear_bit(HCLGE_STATE_PTP_EN, &hdev->state);
+ clear_bit(HCLGE_PTP_FLAG_EN, &ptp->flags);
+ ptp->ts_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
+ ptp->ts_cfg.tx_type = HWTSTAMP_TX_OFF;
+
+ if (hclge_ptp_set_ts_mode(hdev, &ptp->ts_cfg))
+ dev_err(&hdev->pdev->dev, "failed to disable phc\n");
+
+ if (ptp->tx_skb) {
+ struct sk_buff *skb = ptp->tx_skb;
+
+ ptp->tx_skb = NULL;
+ dev_kfree_skb_any(skb);
+ }
+
+ hclge_ptp_destroy_clock(hdev);
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h
new file mode 100644
index 0000000000..bbee74cd84
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+// Copyright (c) 2021 Hisilicon Limited.
+
+#ifndef __HCLGE_PTP_H
+#define __HCLGE_PTP_H
+
+#include <linux/ptp_clock_kernel.h>
+#include <linux/net_tstamp.h>
+#include <linux/types.h>
+
+struct hclge_dev;
+struct ifreq;
+
+#define HCLGE_PTP_REG_OFFSET 0x29000
+
+#define HCLGE_PTP_TX_TS_SEQID_REG 0x0
+#define HCLGE_PTP_TX_TS_NSEC_REG 0x4
+#define HCLGE_PTP_TX_TS_NSEC_MASK GENMASK(29, 0)
+#define HCLGE_PTP_TX_TS_SEC_L_REG 0x8
+#define HCLGE_PTP_TX_TS_SEC_H_REG 0xC
+#define HCLGE_PTP_TX_TS_SEC_H_MASK GENMASK(15, 0)
+#define HCLGE_PTP_TX_TS_CNT_REG 0x30
+
+#define HCLGE_PTP_TIME_SEC_H_REG 0x50
+#define HCLGE_PTP_TIME_SEC_H_MASK GENMASK(15, 0)
+#define HCLGE_PTP_TIME_SEC_L_REG 0x54
+#define HCLGE_PTP_TIME_NSEC_REG 0x58
+#define HCLGE_PTP_TIME_NSEC_MASK GENMASK(29, 0)
+#define HCLGE_PTP_TIME_NSEC_NEG BIT(31)
+#define HCLGE_PTP_TIME_SYNC_REG 0x5C
+#define HCLGE_PTP_TIME_SYNC_EN BIT(0)
+#define HCLGE_PTP_TIME_ADJ_REG 0x60
+#define HCLGE_PTP_TIME_ADJ_EN BIT(0)
+#define HCLGE_PTP_CYCLE_QUO_REG 0x64
+#define HCLGE_PTP_CYCLE_QUO_MASK GENMASK(7, 0)
+#define HCLGE_PTP_CYCLE_DEN_REG 0x68
+#define HCLGE_PTP_CYCLE_NUM_REG 0x6C
+#define HCLGE_PTP_CYCLE_CFG_REG 0x70
+#define HCLGE_PTP_CYCLE_ADJ_EN BIT(0)
+#define HCLGE_PTP_CUR_TIME_SEC_H_REG 0x74
+#define HCLGE_PTP_CUR_TIME_SEC_L_REG 0x78
+#define HCLGE_PTP_CUR_TIME_NSEC_REG 0x7C
+
+#define HCLGE_PTP_CYCLE_ADJ_MAX 500000000
+#define HCLGE_PTP_SEC_H_OFFSET 32u
+#define HCLGE_PTP_SEC_L_MASK GENMASK(31, 0)
+
+#define HCLGE_PTP_FLAG_EN 0
+#define HCLGE_PTP_FLAG_TX_EN 1
+#define HCLGE_PTP_FLAG_RX_EN 2
+
+struct hclge_ptp_cycle {
+ u32 quo;
+ u32 numer;
+ u32 den;
+};
+
+struct hclge_ptp {
+ struct hclge_dev *hdev;
+ struct ptp_clock *clock;
+ struct sk_buff *tx_skb;
+ unsigned long flags;
+ void __iomem *io_base;
+ struct ptp_clock_info info;
+ struct hwtstamp_config ts_cfg;
+ spinlock_t lock; /* protects ptp registers */
+ u32 ptp_cfg;
+ u32 last_tx_seqid;
+ struct hclge_ptp_cycle cycle;
+ unsigned long tx_start;
+ unsigned long tx_cnt;
+ unsigned long tx_skipped;
+ unsigned long tx_cleaned;
+ unsigned long last_rx;
+ unsigned long rx_cnt;
+ unsigned long tx_timeout;
+};
+
+struct hclge_ptp_int_cmd {
+#define HCLGE_PTP_INT_EN_B BIT(0)
+
+ u8 int_en;
+ u8 rsvd[23];
+};
+
+enum hclge_ptp_udp_type {
+ HCLGE_PTP_UDP_NOT_TYPE,
+ HCLGE_PTP_UDP_P13F_TYPE,
+ HCLGE_PTP_UDP_P140_TYPE,
+ HCLGE_PTP_UDP_FULL_TYPE,
+};
+
+enum hclge_ptp_msg_type {
+ HCLGE_PTP_MSG_TYPE_V2_L2,
+ HCLGE_PTP_MSG_TYPE_V2,
+ HCLGE_PTP_MSG_TYPE_V2_EVENT,
+};
+
+enum hclge_ptp_msg0_type {
+ HCLGE_PTP_MSG0_V2_DELAY_REQ = 1,
+ HCLGE_PTP_MSG0_V2_PDELAY_REQ,
+ HCLGE_PTP_MSG0_V2_DELAY_RESP,
+ HCLGE_PTP_MSG0_V2_EVENT = 0xF,
+};
+
+#define HCLGE_PTP_MSG1_V2_DEFAULT 1
+
+struct hclge_ptp_cfg_cmd {
+#define HCLGE_PTP_EN_B BIT(0)
+#define HCLGE_PTP_TX_EN_B BIT(1)
+#define HCLGE_PTP_RX_EN_B BIT(2)
+#define HCLGE_PTP_UDP_EN_SHIFT 3
+#define HCLGE_PTP_UDP_EN_MASK GENMASK(4, 3)
+#define HCLGE_PTP_MSG_TYPE_SHIFT 8
+#define HCLGE_PTP_MSG_TYPE_MASK GENMASK(9, 8)
+#define HCLGE_PTP_MSG1_SHIFT 16
+#define HCLGE_PTP_MSG1_MASK GENMASK(19, 16)
+#define HCLGE_PTP_MSG0_SHIFT 24
+#define HCLGE_PTP_MSG0_MASK GENMASK(27, 24)
+
+ __le32 cfg;
+ u8 rsvd[20];
+};
+
+static inline struct hclge_dev *hclge_ptp_get_hdev(struct ptp_clock_info *info)
+{
+ struct hclge_ptp *ptp = container_of(info, struct hclge_ptp, info);
+
+ return ptp->hdev;
+}
+
+bool hclge_ptp_set_tx_info(struct hnae3_handle *handle, struct sk_buff *skb);
+void hclge_ptp_clean_tx_hwts(struct hclge_dev *hdev);
+void hclge_ptp_get_rx_hwts(struct hnae3_handle *handle, struct sk_buff *skb,
+ u32 nsec, u32 sec);
+int hclge_ptp_get_cfg(struct hclge_dev *hdev, struct ifreq *ifr);
+int hclge_ptp_set_cfg(struct hclge_dev *hdev, struct ifreq *ifr);
+int hclge_ptp_init(struct hclge_dev *hdev);
+void hclge_ptp_uninit(struct hclge_dev *hdev);
+int hclge_ptp_get_ts_info(struct hnae3_handle *handle,
+ struct ethtool_ts_info *info);
+int hclge_ptp_cfg_qry(struct hclge_dev *hdev, u32 *cfg);
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c
new file mode 100644
index 0000000000..43c1c18fa8
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c
@@ -0,0 +1,668 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2023 Hisilicon Limited.
+
+#include "hclge_cmd.h"
+#include "hclge_main.h"
+#include "hclge_regs.h"
+#include "hnae3.h"
+
+static const u32 cmdq_reg_addr_list[] = {HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG,
+ HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG,
+ HCLGE_COMM_NIC_CSQ_DEPTH_REG,
+ HCLGE_COMM_NIC_CSQ_TAIL_REG,
+ HCLGE_COMM_NIC_CSQ_HEAD_REG,
+ HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG,
+ HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG,
+ HCLGE_COMM_NIC_CRQ_DEPTH_REG,
+ HCLGE_COMM_NIC_CRQ_TAIL_REG,
+ HCLGE_COMM_NIC_CRQ_HEAD_REG,
+ HCLGE_COMM_VECTOR0_CMDQ_SRC_REG,
+ HCLGE_COMM_CMDQ_INTR_STS_REG,
+ HCLGE_COMM_CMDQ_INTR_EN_REG,
+ HCLGE_COMM_CMDQ_INTR_GEN_REG};
+
+static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
+ HCLGE_PF_OTHER_INT_REG,
+ HCLGE_MISC_RESET_STS_REG,
+ HCLGE_MISC_VECTOR_INT_STS,
+ HCLGE_GLOBAL_RESET_REG,
+ HCLGE_FUN_RST_ING,
+ HCLGE_GRO_EN_REG};
+
+static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
+ HCLGE_RING_RX_ADDR_H_REG,
+ HCLGE_RING_RX_BD_NUM_REG,
+ HCLGE_RING_RX_BD_LENGTH_REG,
+ HCLGE_RING_RX_MERGE_EN_REG,
+ HCLGE_RING_RX_TAIL_REG,
+ HCLGE_RING_RX_HEAD_REG,
+ HCLGE_RING_RX_FBD_NUM_REG,
+ HCLGE_RING_RX_OFFSET_REG,
+ HCLGE_RING_RX_FBD_OFFSET_REG,
+ HCLGE_RING_RX_STASH_REG,
+ HCLGE_RING_RX_BD_ERR_REG,
+ HCLGE_RING_TX_ADDR_L_REG,
+ HCLGE_RING_TX_ADDR_H_REG,
+ HCLGE_RING_TX_BD_NUM_REG,
+ HCLGE_RING_TX_PRIORITY_REG,
+ HCLGE_RING_TX_TC_REG,
+ HCLGE_RING_TX_MERGE_EN_REG,
+ HCLGE_RING_TX_TAIL_REG,
+ HCLGE_RING_TX_HEAD_REG,
+ HCLGE_RING_TX_FBD_NUM_REG,
+ HCLGE_RING_TX_OFFSET_REG,
+ HCLGE_RING_TX_EBD_NUM_REG,
+ HCLGE_RING_TX_EBD_OFFSET_REG,
+ HCLGE_RING_TX_BD_ERR_REG,
+ HCLGE_RING_EN_REG};
+
+static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
+ HCLGE_TQP_INTR_GL0_REG,
+ HCLGE_TQP_INTR_GL1_REG,
+ HCLGE_TQP_INTR_GL2_REG,
+ HCLGE_TQP_INTR_RL_REG};
+
+/* Get DFX BD number offset */
+#define HCLGE_DFX_BIOS_BD_OFFSET 1
+#define HCLGE_DFX_SSU_0_BD_OFFSET 2
+#define HCLGE_DFX_SSU_1_BD_OFFSET 3
+#define HCLGE_DFX_IGU_BD_OFFSET 4
+#define HCLGE_DFX_RPU_0_BD_OFFSET 5
+#define HCLGE_DFX_RPU_1_BD_OFFSET 6
+#define HCLGE_DFX_NCSI_BD_OFFSET 7
+#define HCLGE_DFX_RTC_BD_OFFSET 8
+#define HCLGE_DFX_PPP_BD_OFFSET 9
+#define HCLGE_DFX_RCB_BD_OFFSET 10
+#define HCLGE_DFX_TQP_BD_OFFSET 11
+#define HCLGE_DFX_SSU_2_BD_OFFSET 12
+
+static const u32 hclge_dfx_bd_offset_list[] = {
+ HCLGE_DFX_BIOS_BD_OFFSET,
+ HCLGE_DFX_SSU_0_BD_OFFSET,
+ HCLGE_DFX_SSU_1_BD_OFFSET,
+ HCLGE_DFX_IGU_BD_OFFSET,
+ HCLGE_DFX_RPU_0_BD_OFFSET,
+ HCLGE_DFX_RPU_1_BD_OFFSET,
+ HCLGE_DFX_NCSI_BD_OFFSET,
+ HCLGE_DFX_RTC_BD_OFFSET,
+ HCLGE_DFX_PPP_BD_OFFSET,
+ HCLGE_DFX_RCB_BD_OFFSET,
+ HCLGE_DFX_TQP_BD_OFFSET,
+ HCLGE_DFX_SSU_2_BD_OFFSET
+};
+
+static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
+ HCLGE_OPC_DFX_BIOS_COMMON_REG,
+ HCLGE_OPC_DFX_SSU_REG_0,
+ HCLGE_OPC_DFX_SSU_REG_1,
+ HCLGE_OPC_DFX_IGU_EGU_REG,
+ HCLGE_OPC_DFX_RPU_REG_0,
+ HCLGE_OPC_DFX_RPU_REG_1,
+ HCLGE_OPC_DFX_NCSI_REG,
+ HCLGE_OPC_DFX_RTC_REG,
+ HCLGE_OPC_DFX_PPP_REG,
+ HCLGE_OPC_DFX_RCB_REG,
+ HCLGE_OPC_DFX_TQP_REG,
+ HCLGE_OPC_DFX_SSU_REG_2
+};
+
+enum hclge_reg_tag {
+ HCLGE_REG_TAG_CMDQ = 0,
+ HCLGE_REG_TAG_COMMON,
+ HCLGE_REG_TAG_RING,
+ HCLGE_REG_TAG_TQP_INTR,
+ HCLGE_REG_TAG_QUERY_32_BIT,
+ HCLGE_REG_TAG_QUERY_64_BIT,
+ HCLGE_REG_TAG_DFX_BIOS_COMMON,
+ HCLGE_REG_TAG_DFX_SSU_0,
+ HCLGE_REG_TAG_DFX_SSU_1,
+ HCLGE_REG_TAG_DFX_IGU_EGU,
+ HCLGE_REG_TAG_DFX_RPU_0,
+ HCLGE_REG_TAG_DFX_RPU_1,
+ HCLGE_REG_TAG_DFX_NCSI,
+ HCLGE_REG_TAG_DFX_RTC,
+ HCLGE_REG_TAG_DFX_PPP,
+ HCLGE_REG_TAG_DFX_RCB,
+ HCLGE_REG_TAG_DFX_TQP,
+ HCLGE_REG_TAG_DFX_SSU_2,
+ HCLGE_REG_TAG_RPU_TNL,
+};
+
+#pragma pack(4)
+struct hclge_reg_tlv {
+ u16 tag;
+ u16 len;
+};
+
+struct hclge_reg_header {
+ u64 magic_number;
+ u8 is_vf;
+ u8 rsv[7];
+};
+
+#pragma pack()
+
+#define HCLGE_REG_TLV_SIZE sizeof(struct hclge_reg_tlv)
+#define HCLGE_REG_HEADER_SIZE sizeof(struct hclge_reg_header)
+#define HCLGE_REG_TLV_SPACE (sizeof(struct hclge_reg_tlv) / sizeof(u32))
+#define HCLGE_REG_HEADER_SPACE (sizeof(struct hclge_reg_header) / sizeof(u32))
+#define HCLGE_REG_MAGIC_NUMBER 0x686e733372656773 /* meaning is hns3regs */
+
+#define HCLGE_REG_RPU_TNL_ID_0 1
+
+static u32 hclge_reg_get_header(void *data)
+{
+ struct hclge_reg_header *header = data;
+
+ header->magic_number = HCLGE_REG_MAGIC_NUMBER;
+ header->is_vf = 0x0;
+
+ return HCLGE_REG_HEADER_SPACE;
+}
+
+static u32 hclge_reg_get_tlv(u32 tag, u32 regs_num, void *data)
+{
+ struct hclge_reg_tlv *tlv = data;
+
+ tlv->tag = tag;
+ tlv->len = regs_num * sizeof(u32) + HCLGE_REG_TLV_SIZE;
+
+ return HCLGE_REG_TLV_SPACE;
+}
+
+static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
+ void *data)
+{
+#define HCLGE_32_BIT_REG_RTN_DATANUM 8
+#define HCLGE_32_BIT_DESC_NODATA_LEN 2
+
+ struct hclge_desc *desc;
+ u32 *reg_val = data;
+ __le32 *desc_data;
+ int nodata_num;
+ int cmd_num;
+ int i, k, n;
+ int ret;
+
+ if (regs_num == 0)
+ return 0;
+
+ nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
+ cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
+ HCLGE_32_BIT_REG_RTN_DATANUM);
+ desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Query 32 bit register cmd failed, ret = %d.\n", ret);
+ kfree(desc);
+ return ret;
+ }
+
+ for (i = 0; i < cmd_num; i++) {
+ if (i == 0) {
+ desc_data = (__le32 *)(&desc[i].data[0]);
+ n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
+ } else {
+ desc_data = (__le32 *)(&desc[i]);
+ n = HCLGE_32_BIT_REG_RTN_DATANUM;
+ }
+ for (k = 0; k < n; k++) {
+ *reg_val++ = le32_to_cpu(*desc_data++);
+
+ regs_num--;
+ if (!regs_num)
+ break;
+ }
+ }
+
+ kfree(desc);
+ return 0;
+}
+
+static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
+ void *data)
+{
+#define HCLGE_64_BIT_REG_RTN_DATANUM 4
+#define HCLGE_64_BIT_DESC_NODATA_LEN 1
+
+ struct hclge_desc *desc;
+ u64 *reg_val = data;
+ __le64 *desc_data;
+ int nodata_len;
+ int cmd_num;
+ int i, k, n;
+ int ret;
+
+ if (regs_num == 0)
+ return 0;
+
+ nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
+ cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
+ HCLGE_64_BIT_REG_RTN_DATANUM);
+ desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Query 64 bit register cmd failed, ret = %d.\n", ret);
+ kfree(desc);
+ return ret;
+ }
+
+ for (i = 0; i < cmd_num; i++) {
+ if (i == 0) {
+ desc_data = (__le64 *)(&desc[i].data[0]);
+ n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
+ } else {
+ desc_data = (__le64 *)(&desc[i]);
+ n = HCLGE_64_BIT_REG_RTN_DATANUM;
+ }
+ for (k = 0; k < n; k++) {
+ *reg_val++ = le64_to_cpu(*desc_data++);
+
+ regs_num--;
+ if (!regs_num)
+ break;
+ }
+ }
+
+ kfree(desc);
+ return 0;
+}
+
+int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
+{
+ int i;
+
+ /* initialize command BD except the last one */
+ for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
+ hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
+ true);
+ desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ }
+
+ /* initialize the last command BD */
+ hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
+
+ return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
+}
+
+static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
+ int *bd_num_list,
+ u32 type_num)
+{
+ u32 entries_per_desc, desc_index, index, offset, i;
+ struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
+ int ret;
+
+ ret = hclge_query_bd_num_cmd_send(hdev, desc);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get dfx bd num fail, status is %d.\n", ret);
+ return ret;
+ }
+
+ entries_per_desc = ARRAY_SIZE(desc[0].data);
+ for (i = 0; i < type_num; i++) {
+ offset = hclge_dfx_bd_offset_list[i];
+ index = offset % entries_per_desc;
+ desc_index = offset / entries_per_desc;
+ bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
+ }
+
+ return ret;
+}
+
+static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
+ struct hclge_desc *desc_src, int bd_num,
+ enum hclge_opcode_type cmd)
+{
+ struct hclge_desc *desc = desc_src;
+ int i, ret;
+
+ hclge_cmd_setup_basic_desc(desc, cmd, true);
+ for (i = 0; i < bd_num - 1; i++) {
+ desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ desc++;
+ hclge_cmd_setup_basic_desc(desc, cmd, true);
+ }
+
+ desc = desc_src;
+ ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
+ cmd, ret);
+
+ return ret;
+}
+
+/* tnl_id = 0 means get sum of all tnl reg's value */
+static int hclge_dfx_reg_rpu_tnl_cmd_send(struct hclge_dev *hdev, u32 tnl_id,
+ struct hclge_desc *desc, int bd_num)
+{
+ int i, ret;
+
+ for (i = 0; i < bd_num; i++) {
+ hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_RPU_REG_0,
+ true);
+ if (i != bd_num - 1)
+ desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ }
+
+ desc[0].data[0] = cpu_to_le32(tnl_id);
+ ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to query dfx rpu tnl reg, ret = %d\n",
+ ret);
+ return ret;
+}
+
+static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
+ void *data)
+{
+ int entries_per_desc, reg_num, desc_index, index, i;
+ struct hclge_desc *desc = desc_src;
+ u32 *reg = data;
+
+ entries_per_desc = ARRAY_SIZE(desc->data);
+ reg_num = entries_per_desc * bd_num;
+ for (i = 0; i < reg_num; i++) {
+ index = i % entries_per_desc;
+ desc_index = i / entries_per_desc;
+ *reg++ = le32_to_cpu(desc[desc_index].data[index]);
+ }
+
+ return reg_num;
+}
+
+static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
+{
+ u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ int data_len_per_desc;
+ int *bd_num_list;
+ int ret;
+ u32 i;
+
+ bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
+ if (!bd_num_list)
+ return -ENOMEM;
+
+ ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get dfx reg bd num fail, status is %d.\n", ret);
+ goto out;
+ }
+
+ data_len_per_desc = sizeof_field(struct hclge_desc, data);
+ *len = 0;
+ for (i = 0; i < dfx_reg_type_num; i++)
+ *len += bd_num_list[i] * data_len_per_desc + HCLGE_REG_TLV_SIZE;
+
+ /**
+ * the num of dfx_rpu_0 is reused by each dfx_rpu_tnl
+ * HCLGE_DFX_BD_OFFSET is starting at 1, but the array subscript is
+ * starting at 0, so offset need '- 1'.
+ */
+ *len += (bd_num_list[HCLGE_DFX_RPU_0_BD_OFFSET - 1] * data_len_per_desc +
+ HCLGE_REG_TLV_SIZE) * ae_dev->dev_specs.tnl_num;
+
+out:
+ kfree(bd_num_list);
+ return ret;
+}
+
+static int hclge_get_dfx_rpu_tnl_reg(struct hclge_dev *hdev, u32 *reg,
+ struct hclge_desc *desc_src,
+ int bd_num)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ int ret = 0;
+ u8 i;
+
+ for (i = HCLGE_REG_RPU_TNL_ID_0; i <= ae_dev->dev_specs.tnl_num; i++) {
+ ret = hclge_dfx_reg_rpu_tnl_cmd_send(hdev, i, desc_src, bd_num);
+ if (ret)
+ break;
+
+ reg += hclge_reg_get_tlv(HCLGE_REG_TAG_RPU_TNL,
+ ARRAY_SIZE(desc_src->data) * bd_num,
+ reg);
+ reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
+ }
+
+ return ret;
+}
+
+static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
+{
+ u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
+ int bd_num, bd_num_max, buf_len;
+ struct hclge_desc *desc_src;
+ int *bd_num_list;
+ u32 *reg = data;
+ int ret;
+ u32 i;
+
+ bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
+ if (!bd_num_list)
+ return -ENOMEM;
+
+ ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get dfx reg bd num fail, status is %d.\n", ret);
+ goto out;
+ }
+
+ bd_num_max = bd_num_list[0];
+ for (i = 1; i < dfx_reg_type_num; i++)
+ bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
+
+ buf_len = sizeof(*desc_src) * bd_num_max;
+ desc_src = kzalloc(buf_len, GFP_KERNEL);
+ if (!desc_src) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < dfx_reg_type_num; i++) {
+ bd_num = bd_num_list[i];
+ ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
+ hclge_dfx_reg_opcode_list[i]);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get dfx reg fail, status is %d.\n", ret);
+ goto free;
+ }
+
+ reg += hclge_reg_get_tlv(HCLGE_REG_TAG_DFX_BIOS_COMMON + i,
+ ARRAY_SIZE(desc_src->data) * bd_num,
+ reg);
+ reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
+ }
+
+ /**
+ * HCLGE_DFX_BD_OFFSET is starting at 1, but the array subscript is
+ * starting at 0, so offset need '- 1'.
+ */
+ bd_num = bd_num_list[HCLGE_DFX_RPU_0_BD_OFFSET - 1];
+ ret = hclge_get_dfx_rpu_tnl_reg(hdev, reg, desc_src, bd_num);
+
+free:
+ kfree(desc_src);
+out:
+ kfree(bd_num_list);
+ return ret;
+}
+
+static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
+ struct hnae3_knic_private_info *kinfo)
+{
+#define HCLGE_RING_REG_OFFSET 0x200
+#define HCLGE_RING_INT_REG_OFFSET 0x4
+
+ int i, j, reg_num;
+ int data_num_sum;
+ u32 *reg = data;
+
+ /* fetching per-PF registers valus from PF PCIe register space */
+ reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
+ reg += hclge_reg_get_tlv(HCLGE_REG_TAG_CMDQ, reg_num, reg);
+ for (i = 0; i < reg_num; i++)
+ *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
+ data_num_sum = reg_num + HCLGE_REG_TLV_SPACE;
+
+ reg_num = ARRAY_SIZE(common_reg_addr_list);
+ reg += hclge_reg_get_tlv(HCLGE_REG_TAG_COMMON, reg_num, reg);
+ for (i = 0; i < reg_num; i++)
+ *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
+ data_num_sum += reg_num + HCLGE_REG_TLV_SPACE;
+
+ reg_num = ARRAY_SIZE(ring_reg_addr_list);
+ for (j = 0; j < kinfo->num_tqps; j++) {
+ reg += hclge_reg_get_tlv(HCLGE_REG_TAG_RING, reg_num, reg);
+ for (i = 0; i < reg_num; i++)
+ *reg++ = hclge_read_dev(&hdev->hw,
+ ring_reg_addr_list[i] +
+ HCLGE_RING_REG_OFFSET * j);
+ }
+ data_num_sum += (reg_num + HCLGE_REG_TLV_SPACE) * kinfo->num_tqps;
+
+ reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
+ for (j = 0; j < hdev->num_msi_used - 1; j++) {
+ reg += hclge_reg_get_tlv(HCLGE_REG_TAG_TQP_INTR, reg_num, reg);
+ for (i = 0; i < reg_num; i++)
+ *reg++ = hclge_read_dev(&hdev->hw,
+ tqp_intr_reg_addr_list[i] +
+ HCLGE_RING_INT_REG_OFFSET * j);
+ }
+ data_num_sum += (reg_num + HCLGE_REG_TLV_SPACE) *
+ (hdev->num_msi_used - 1);
+
+ return data_num_sum;
+}
+
+static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
+ u32 *regs_num_64_bit)
+{
+ struct hclge_desc desc;
+ u32 total_num;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Query register number cmd failed, ret = %d.\n", ret);
+ return ret;
+ }
+
+ *regs_num_32_bit = le32_to_cpu(desc.data[0]);
+ *regs_num_64_bit = le32_to_cpu(desc.data[1]);
+
+ total_num = *regs_num_32_bit + *regs_num_64_bit;
+ if (!total_num)
+ return -EINVAL;
+
+ return 0;
+}
+
+int hclge_get_regs_len(struct hnae3_handle *handle)
+{
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
+ int cmdq_len, common_len, ring_len, tqp_intr_len;
+ int regs_len_32_bit, regs_len_64_bit;
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get register number failed, ret = %d.\n", ret);
+ return ret;
+ }
+
+ ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get dfx reg len failed, ret = %d.\n", ret);
+ return ret;
+ }
+
+ cmdq_len = HCLGE_REG_TLV_SIZE + sizeof(cmdq_reg_addr_list);
+ common_len = HCLGE_REG_TLV_SIZE + sizeof(common_reg_addr_list);
+ ring_len = HCLGE_REG_TLV_SIZE + sizeof(ring_reg_addr_list);
+ tqp_intr_len = HCLGE_REG_TLV_SIZE + sizeof(tqp_intr_reg_addr_list);
+ regs_len_32_bit = HCLGE_REG_TLV_SIZE + regs_num_32_bit * sizeof(u32);
+ regs_len_64_bit = HCLGE_REG_TLV_SIZE + regs_num_64_bit * sizeof(u64);
+
+ /* return the total length of all register values */
+ return HCLGE_REG_HEADER_SIZE + cmdq_len + common_len + ring_len *
+ kinfo->num_tqps + tqp_intr_len * (hdev->num_msi_used - 1) +
+ regs_len_32_bit + regs_len_64_bit + dfx_regs_len;
+}
+
+void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
+ void *data)
+{
+#define HCLGE_REG_64_BIT_SPACE_MULTIPLE 2
+
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ u32 regs_num_32_bit, regs_num_64_bit;
+ u32 *reg = data;
+ int ret;
+
+ *version = hdev->fw_version;
+
+ ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get register number failed, ret = %d.\n", ret);
+ return;
+ }
+
+ reg += hclge_reg_get_header(reg);
+ reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
+
+ reg += hclge_reg_get_tlv(HCLGE_REG_TAG_QUERY_32_BIT,
+ regs_num_32_bit, reg);
+ ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get 32 bit register failed, ret = %d.\n", ret);
+ return;
+ }
+ reg += regs_num_32_bit;
+
+ reg += hclge_reg_get_tlv(HCLGE_REG_TAG_QUERY_64_BIT,
+ regs_num_64_bit *
+ HCLGE_REG_64_BIT_SPACE_MULTIPLE, reg);
+ ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get 64 bit register failed, ret = %d.\n", ret);
+ return;
+ }
+ reg += regs_num_64_bit * HCLGE_REG_64_BIT_SPACE_MULTIPLE;
+
+ ret = hclge_get_dfx_reg(hdev, reg);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "Get dfx register failed, ret = %d.\n", ret);
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.h
new file mode 100644
index 0000000000..b6bc1ecb80
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+// Copyright (c) 2023 Hisilicon Limited.
+
+#ifndef __HCLGE_REGS_H
+#define __HCLGE_REGS_H
+#include <linux/types.h>
+#include "hclge_comm_cmd.h"
+
+struct hnae3_handle;
+struct hclge_dev;
+
+int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev,
+ struct hclge_desc *desc);
+int hclge_get_regs_len(struct hnae3_handle *handle);
+void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
+ void *data);
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
new file mode 100644
index 0000000000..c58c312217
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -0,0 +1,2145 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
+
+#include <linux/etherdevice.h>
+
+#include "hclge_cmd.h"
+#include "hclge_main.h"
+#include "hclge_tm.h"
+
+enum hclge_shaper_level {
+ HCLGE_SHAPER_LVL_PRI = 0,
+ HCLGE_SHAPER_LVL_PG = 1,
+ HCLGE_SHAPER_LVL_PORT = 2,
+ HCLGE_SHAPER_LVL_QSET = 3,
+ HCLGE_SHAPER_LVL_CNT = 4,
+ HCLGE_SHAPER_LVL_VF = 0,
+ HCLGE_SHAPER_LVL_PF = 1,
+};
+
+#define HCLGE_TM_PFC_PKT_GET_CMD_NUM 3
+#define HCLGE_TM_PFC_NUM_GET_PER_CMD 3
+
+#define HCLGE_SHAPER_BS_U_DEF 5
+#define HCLGE_SHAPER_BS_S_DEF 20
+
+/* hclge_shaper_para_calc: calculate ir parameter for the shaper
+ * @ir: Rate to be config, its unit is Mbps
+ * @shaper_level: the shaper level. eg: port, pg, priority, queueset
+ * @ir_para: parameters of IR shaper
+ * @max_tm_rate: max tm rate is available to config
+ *
+ * the formula:
+ *
+ * IR_b * (2 ^ IR_u) * 8
+ * IR(Mbps) = ------------------------- * CLOCK(1000Mbps)
+ * Tick * (2 ^ IR_s)
+ *
+ * @return: 0: calculate sucessful, negative: fail
+ */
+static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
+ struct hclge_shaper_ir_para *ir_para,
+ u32 max_tm_rate)
+{
+#define DEFAULT_SHAPER_IR_B 126
+#define DIVISOR_CLK (1000 * 8)
+#define DEFAULT_DIVISOR_IR_B (DEFAULT_SHAPER_IR_B * DIVISOR_CLK)
+
+ static const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = {
+ 6 * 256, /* Prioriy level */
+ 6 * 32, /* Prioriy group level */
+ 6 * 8, /* Port level */
+ 6 * 256 /* Qset level */
+ };
+ u8 ir_u_calc = 0;
+ u8 ir_s_calc = 0;
+ u32 ir_calc;
+ u32 tick;
+
+ /* Calc tick */
+ if (shaper_level >= HCLGE_SHAPER_LVL_CNT ||
+ ir > max_tm_rate)
+ return -EINVAL;
+
+ tick = tick_array[shaper_level];
+
+ /**
+ * Calc the speed if ir_b = 126, ir_u = 0 and ir_s = 0
+ * the formula is changed to:
+ * 126 * 1 * 8
+ * ir_calc = ---------------- * 1000
+ * tick * 1
+ */
+ ir_calc = (DEFAULT_DIVISOR_IR_B + (tick >> 1) - 1) / tick;
+
+ if (ir_calc == ir) {
+ ir_para->ir_b = DEFAULT_SHAPER_IR_B;
+ ir_para->ir_u = 0;
+ ir_para->ir_s = 0;
+
+ return 0;
+ } else if (ir_calc > ir) {
+ /* Increasing the denominator to select ir_s value */
+ while (ir_calc >= ir && ir) {
+ ir_s_calc++;
+ ir_calc = DEFAULT_DIVISOR_IR_B /
+ (tick * (1 << ir_s_calc));
+ }
+
+ ir_para->ir_b = (ir * tick * (1 << ir_s_calc) +
+ (DIVISOR_CLK >> 1)) / DIVISOR_CLK;
+ } else {
+ /* Increasing the numerator to select ir_u value */
+ u32 numerator;
+
+ while (ir_calc < ir) {
+ ir_u_calc++;
+ numerator = DEFAULT_DIVISOR_IR_B * (1 << ir_u_calc);
+ ir_calc = (numerator + (tick >> 1)) / tick;
+ }
+
+ if (ir_calc == ir) {
+ ir_para->ir_b = DEFAULT_SHAPER_IR_B;
+ } else {
+ u32 denominator = DIVISOR_CLK * (1 << --ir_u_calc);
+ ir_para->ir_b = (ir * tick + (denominator >> 1)) /
+ denominator;
+ }
+ }
+
+ ir_para->ir_u = ir_u_calc;
+ ir_para->ir_s = ir_s_calc;
+
+ return 0;
+}
+
+static const u16 hclge_pfc_tx_stats_offset[] = {
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num),
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num),
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num),
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num),
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num),
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num),
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num),
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)
+};
+
+static const u16 hclge_pfc_rx_stats_offset[] = {
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num),
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num),
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num),
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num),
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num),
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num),
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num),
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)
+};
+
+static void hclge_pfc_stats_get(struct hclge_dev *hdev, bool tx, u64 *stats)
+{
+ const u16 *offset;
+ int i;
+
+ if (tx)
+ offset = hclge_pfc_tx_stats_offset;
+ else
+ offset = hclge_pfc_rx_stats_offset;
+
+ for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
+ stats[i] = HCLGE_STATS_READ(&hdev->mac_stats, offset[i]);
+}
+
+void hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats)
+{
+ hclge_pfc_stats_get(hdev, false, stats);
+}
+
+void hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats)
+{
+ hclge_pfc_stats_get(hdev, true, stats);
+}
+
+int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx)
+{
+ struct hclge_desc desc;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PAUSE_EN, false);
+
+ desc.data[0] = cpu_to_le32((tx ? HCLGE_TX_MAC_PAUSE_EN_MSK : 0) |
+ (rx ? HCLGE_RX_MAC_PAUSE_EN_MSK : 0));
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
+int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
+ u8 pfc_bitmap)
+{
+ struct hclge_desc desc;
+ struct hclge_pfc_en_cmd *pfc = (struct hclge_pfc_en_cmd *)desc.data;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PFC_PAUSE_EN, false);
+
+ pfc->tx_rx_en_bitmap = tx_rx_bitmap;
+ pfc->pri_en_bitmap = pfc_bitmap;
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
+static int hclge_pause_param_cfg(struct hclge_dev *hdev, const u8 *addr,
+ u8 pause_trans_gap, u16 pause_trans_time)
+{
+ struct hclge_cfg_pause_param_cmd *pause_param;
+ struct hclge_desc desc;
+
+ pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, false);
+
+ ether_addr_copy(pause_param->mac_addr, addr);
+ ether_addr_copy(pause_param->mac_addr_extra, addr);
+ pause_param->pause_trans_gap = pause_trans_gap;
+ pause_param->pause_trans_time = cpu_to_le16(pause_trans_time);
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
+int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr)
+{
+ struct hclge_cfg_pause_param_cmd *pause_param;
+ struct hclge_desc desc;
+ u16 trans_time;
+ u8 trans_gap;
+ int ret;
+
+ pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ return ret;
+
+ trans_gap = pause_param->pause_trans_gap;
+ trans_time = le16_to_cpu(pause_param->pause_trans_time);
+
+ return hclge_pause_param_cfg(hdev, mac_addr, trans_gap, trans_time);
+}
+
+static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id)
+{
+ u8 tc;
+
+ tc = hdev->tm_info.prio_tc[pri_id];
+
+ if (tc >= hdev->tm_info.num_tc)
+ return -EINVAL;
+
+ /**
+ * the register for priority has four bytes, the first bytes includes
+ * priority0 and priority1, the higher 4bit stands for priority1
+ * while the lower 4bit stands for priority0, as below:
+ * first byte: | pri_1 | pri_0 |
+ * second byte: | pri_3 | pri_2 |
+ * third byte: | pri_5 | pri_4 |
+ * fourth byte: | pri_7 | pri_6 |
+ */
+ pri[pri_id >> 1] |= tc << ((pri_id & 1) * 4);
+
+ return 0;
+}
+
+int hclge_up_to_tc_map(struct hclge_dev *hdev)
+{
+ struct hclge_desc desc;
+ u8 *pri = (u8 *)desc.data;
+ u8 pri_id;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, false);
+
+ for (pri_id = 0; pri_id < HNAE3_MAX_USER_PRIO; pri_id++) {
+ ret = hclge_fill_pri_array(hdev, pri, pri_id);
+ if (ret)
+ return ret;
+ }
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
+static void hclge_dscp_to_prio_map_init(struct hclge_dev *hdev)
+{
+ u8 i;
+
+ hdev->vport[0].nic.kinfo.tc_map_mode = HNAE3_TC_MAP_MODE_PRIO;
+ hdev->vport[0].nic.kinfo.dscp_app_cnt = 0;
+ for (i = 0; i < HNAE3_MAX_DSCP; i++)
+ hdev->vport[0].nic.kinfo.dscp_prio[i] = HNAE3_PRIO_ID_INVALID;
+}
+
+int hclge_dscp_to_tc_map(struct hclge_dev *hdev)
+{
+ struct hclge_desc desc[HCLGE_DSCP_MAP_TC_BD_NUM];
+ u8 *req0 = (u8 *)desc[0].data;
+ u8 *req1 = (u8 *)desc[1].data;
+ u8 pri_id, tc_id, i, j;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QOS_MAP, false);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_QOS_MAP, false);
+
+ /* The low 32 dscp setting use bd0, high 32 dscp setting use bd1 */
+ for (i = 0; i < HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM; i++) {
+ pri_id = hdev->vport[0].nic.kinfo.dscp_prio[i];
+ pri_id = pri_id == HNAE3_PRIO_ID_INVALID ? 0 : pri_id;
+ tc_id = hdev->tm_info.prio_tc[pri_id];
+ /* Each dscp setting has 4 bits, so each byte saves two dscp
+ * setting
+ */
+ req0[i >> 1] |= tc_id << HCLGE_DSCP_TC_SHIFT(i);
+
+ j = i + HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM;
+ pri_id = hdev->vport[0].nic.kinfo.dscp_prio[j];
+ pri_id = pri_id == HNAE3_PRIO_ID_INVALID ? 0 : pri_id;
+ tc_id = hdev->tm_info.prio_tc[pri_id];
+ req1[i >> 1] |= tc_id << HCLGE_DSCP_TC_SHIFT(i);
+ }
+
+ return hclge_cmd_send(&hdev->hw, desc, HCLGE_DSCP_MAP_TC_BD_NUM);
+}
+
+static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev *hdev,
+ u8 pg_id, u8 pri_bit_map)
+{
+ struct hclge_pg_to_pri_link_cmd *map;
+ struct hclge_desc desc;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_TO_PRI_LINK, false);
+
+ map = (struct hclge_pg_to_pri_link_cmd *)desc.data;
+
+ map->pg_id = pg_id;
+ map->pri_bit_map = pri_bit_map;
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
+static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev, u16 qs_id, u8 pri,
+ bool link_vld)
+{
+ struct hclge_qs_to_pri_link_cmd *map;
+ struct hclge_desc desc;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, false);
+
+ map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
+
+ map->qs_id = cpu_to_le16(qs_id);
+ map->priority = pri;
+ map->link_vld = link_vld ? HCLGE_TM_QS_PRI_LINK_VLD_MSK : 0;
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
+static int hclge_tm_q_to_qs_map_cfg(struct hclge_dev *hdev,
+ u16 q_id, u16 qs_id)
+{
+ struct hclge_nq_to_qs_link_cmd *map;
+ struct hclge_desc desc;
+ u16 qs_id_l;
+ u16 qs_id_h;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, false);
+
+ map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
+
+ map->nq_id = cpu_to_le16(q_id);
+
+ /* convert qs_id to the following format to support qset_id >= 1024
+ * qs_id: | 15 | 14 ~ 10 | 9 ~ 0 |
+ * / / \ \
+ * / / \ \
+ * qset_id: | 15 ~ 11 | 10 | 9 ~ 0 |
+ * | qs_id_h | vld | qs_id_l |
+ */
+ qs_id_l = hnae3_get_field(qs_id, HCLGE_TM_QS_ID_L_MSK,
+ HCLGE_TM_QS_ID_L_S);
+ qs_id_h = hnae3_get_field(qs_id, HCLGE_TM_QS_ID_H_MSK,
+ HCLGE_TM_QS_ID_H_S);
+ hnae3_set_field(qs_id, HCLGE_TM_QS_ID_L_MSK, HCLGE_TM_QS_ID_L_S,
+ qs_id_l);
+ hnae3_set_field(qs_id, HCLGE_TM_QS_ID_H_EXT_MSK, HCLGE_TM_QS_ID_H_EXT_S,
+ qs_id_h);
+ map->qset_id = cpu_to_le16(qs_id | HCLGE_TM_Q_QS_LINK_VLD_MSK);
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
+static int hclge_tm_pg_weight_cfg(struct hclge_dev *hdev, u8 pg_id,
+ u8 dwrr)
+{
+ struct hclge_pg_weight_cmd *weight;
+ struct hclge_desc desc;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_WEIGHT, false);
+
+ weight = (struct hclge_pg_weight_cmd *)desc.data;
+
+ weight->pg_id = pg_id;
+ weight->dwrr = dwrr;
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
+static int hclge_tm_pri_weight_cfg(struct hclge_dev *hdev, u8 pri_id,
+ u8 dwrr)
+{
+ struct hclge_priority_weight_cmd *weight;
+ struct hclge_desc desc;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, false);
+
+ weight = (struct hclge_priority_weight_cmd *)desc.data;
+
+ weight->pri_id = pri_id;
+ weight->dwrr = dwrr;
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
+static int hclge_tm_qs_weight_cfg(struct hclge_dev *hdev, u16 qs_id,
+ u8 dwrr)
+{
+ struct hclge_qs_weight_cmd *weight;
+ struct hclge_desc desc;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, false);
+
+ weight = (struct hclge_qs_weight_cmd *)desc.data;
+
+ weight->qs_id = cpu_to_le16(qs_id);
+ weight->dwrr = dwrr;
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
+static u32 hclge_tm_get_shapping_para(u8 ir_b, u8 ir_u, u8 ir_s,
+ u8 bs_b, u8 bs_s)
+{
+ u32 shapping_para = 0;
+
+ hclge_tm_set_field(shapping_para, IR_B, ir_b);
+ hclge_tm_set_field(shapping_para, IR_U, ir_u);
+ hclge_tm_set_field(shapping_para, IR_S, ir_s);
+ hclge_tm_set_field(shapping_para, BS_B, bs_b);
+ hclge_tm_set_field(shapping_para, BS_S, bs_s);
+
+ return shapping_para;
+}
+
+static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev,
+ enum hclge_shap_bucket bucket, u8 pg_id,
+ u32 shapping_para, u32 rate)
+{
+ struct hclge_pg_shapping_cmd *shap_cfg_cmd;
+ enum hclge_opcode_type opcode;
+ struct hclge_desc desc;
+
+ opcode = bucket ? HCLGE_OPC_TM_PG_P_SHAPPING :
+ HCLGE_OPC_TM_PG_C_SHAPPING;
+ hclge_cmd_setup_basic_desc(&desc, opcode, false);
+
+ shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
+
+ shap_cfg_cmd->pg_id = pg_id;
+
+ shap_cfg_cmd->pg_shapping_para = cpu_to_le32(shapping_para);
+
+ hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1);
+
+ shap_cfg_cmd->pg_rate = cpu_to_le32(rate);
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
+int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
+{
+ struct hclge_port_shapping_cmd *shap_cfg_cmd;
+ struct hclge_shaper_ir_para ir_para;
+ struct hclge_desc desc;
+ u32 shapping_para;
+ int ret;
+
+ ret = hclge_shaper_para_calc(hdev->hw.mac.speed, HCLGE_SHAPER_LVL_PORT,
+ &ir_para,
+ hdev->ae_dev->dev_specs.max_tm_rate);
+ if (ret)
+ return ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, false);
+ shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
+
+ shapping_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u,
+ ir_para.ir_s,
+ HCLGE_SHAPER_BS_U_DEF,
+ HCLGE_SHAPER_BS_S_DEF);
+
+ shap_cfg_cmd->port_shapping_para = cpu_to_le32(shapping_para);
+
+ hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1);
+
+ shap_cfg_cmd->port_rate = cpu_to_le32(hdev->hw.mac.speed);
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
+static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev,
+ enum hclge_shap_bucket bucket, u8 pri_id,
+ u32 shapping_para, u32 rate)
+{
+ struct hclge_pri_shapping_cmd *shap_cfg_cmd;
+ enum hclge_opcode_type opcode;
+ struct hclge_desc desc;
+
+ opcode = bucket ? HCLGE_OPC_TM_PRI_P_SHAPPING :
+ HCLGE_OPC_TM_PRI_C_SHAPPING;
+
+ hclge_cmd_setup_basic_desc(&desc, opcode, false);
+
+ shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
+
+ shap_cfg_cmd->pri_id = pri_id;
+
+ shap_cfg_cmd->pri_shapping_para = cpu_to_le32(shapping_para);
+
+ hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1);
+
+ shap_cfg_cmd->pri_rate = cpu_to_le32(rate);
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
+static int hclge_tm_pg_schd_mode_cfg(struct hclge_dev *hdev, u8 pg_id)
+{
+ struct hclge_desc desc;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_SCH_MODE_CFG, false);
+
+ if (hdev->tm_info.pg_info[pg_id].pg_sch_mode == HCLGE_SCH_MODE_DWRR)
+ desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
+ else
+ desc.data[1] = 0;
+
+ desc.data[0] = cpu_to_le32(pg_id);
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
+static int hclge_tm_pri_schd_mode_cfg(struct hclge_dev *hdev, u8 pri_id)
+{
+ struct hclge_desc desc;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, false);
+
+ if (hdev->tm_info.tc_info[pri_id].tc_sch_mode == HCLGE_SCH_MODE_DWRR)
+ desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
+ else
+ desc.data[1] = 0;
+
+ desc.data[0] = cpu_to_le32(pri_id);
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
+static int hclge_tm_qs_schd_mode_cfg(struct hclge_dev *hdev, u16 qs_id, u8 mode)
+{
+ struct hclge_desc desc;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, false);
+
+ if (mode == HCLGE_SCH_MODE_DWRR)
+ desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
+ else
+ desc.data[1] = 0;
+
+ desc.data[0] = cpu_to_le32(qs_id);
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
+static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc, u8 grp_id,
+ u32 bit_map)
+{
+ struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
+ struct hclge_desc desc;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_BP_TO_QSET_MAPPING,
+ false);
+
+ bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
+
+ bp_to_qs_map_cmd->tc_id = tc;
+ bp_to_qs_map_cmd->qs_group_id = grp_id;
+ bp_to_qs_map_cmd->qs_bit_map = cpu_to_le32(bit_map);
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
+int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate)
+{
+ struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
+ struct hclge_qs_shapping_cmd *shap_cfg_cmd;
+ struct hclge_shaper_ir_para ir_para;
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_desc desc;
+ u32 shaper_para;
+ int ret, i;
+
+ if (!max_tx_rate)
+ max_tx_rate = hdev->ae_dev->dev_specs.max_tm_rate;
+
+ ret = hclge_shaper_para_calc(max_tx_rate, HCLGE_SHAPER_LVL_QSET,
+ &ir_para,
+ hdev->ae_dev->dev_specs.max_tm_rate);
+ if (ret)
+ return ret;
+
+ shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u,
+ ir_para.ir_s,
+ HCLGE_SHAPER_BS_U_DEF,
+ HCLGE_SHAPER_BS_S_DEF);
+
+ for (i = 0; i < kinfo->tc_info.num_tc; i++) {
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG,
+ false);
+
+ shap_cfg_cmd = (struct hclge_qs_shapping_cmd *)desc.data;
+ shap_cfg_cmd->qs_id = cpu_to_le16(vport->qs_offset + i);
+ shap_cfg_cmd->qs_shapping_para = cpu_to_le32(shaper_para);
+
+ hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1);
+ shap_cfg_cmd->qs_rate = cpu_to_le32(max_tx_rate);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "vport%u, qs%u failed to set tx_rate:%d, ret=%d\n",
+ vport->vport_id, shap_cfg_cmd->qs_id,
+ max_tx_rate, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static u16 hclge_vport_get_max_rss_size(struct hclge_vport *vport)
+{
+ struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
+ struct hnae3_tc_info *tc_info = &kinfo->tc_info;
+ struct hclge_dev *hdev = vport->back;
+ u16 max_rss_size = 0;
+ int i;
+
+ if (!tc_info->mqprio_active)
+ return vport->alloc_tqps / tc_info->num_tc;
+
+ for (i = 0; i < HNAE3_MAX_TC; i++) {
+ if (!(hdev->hw_tc_map & BIT(i)) || i >= tc_info->num_tc)
+ continue;
+ if (max_rss_size < tc_info->tqp_count[i])
+ max_rss_size = tc_info->tqp_count[i];
+ }
+
+ return max_rss_size;
+}
+
+static u16 hclge_vport_get_tqp_num(struct hclge_vport *vport)
+{
+ struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
+ struct hnae3_tc_info *tc_info = &kinfo->tc_info;
+ struct hclge_dev *hdev = vport->back;
+ int sum = 0;
+ int i;
+
+ if (!tc_info->mqprio_active)
+ return kinfo->rss_size * tc_info->num_tc;
+
+ for (i = 0; i < HNAE3_MAX_TC; i++) {
+ if (hdev->hw_tc_map & BIT(i) && i < tc_info->num_tc)
+ sum += tc_info->tqp_count[i];
+ }
+
+ return sum;
+}
+
+static void hclge_tm_update_kinfo_rss_size(struct hclge_vport *vport)
+{
+ struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
+ struct hclge_dev *hdev = vport->back;
+ u16 vport_max_rss_size;
+ u16 max_rss_size;
+
+ /* TC configuration is shared by PF/VF in one port, only allow
+ * one tc for VF for simplicity. VF's vport_id is non zero.
+ */
+ if (vport->vport_id) {
+ kinfo->tc_info.max_tc = 1;
+ kinfo->tc_info.num_tc = 1;
+ vport->qs_offset = HNAE3_MAX_TC +
+ vport->vport_id - HCLGE_VF_VPORT_START_NUM;
+ vport_max_rss_size = hdev->vf_rss_size_max;
+ } else {
+ kinfo->tc_info.max_tc = hdev->tc_max;
+ kinfo->tc_info.num_tc =
+ min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc);
+ vport->qs_offset = 0;
+ vport_max_rss_size = hdev->pf_rss_size_max;
+ }
+
+ max_rss_size = min_t(u16, vport_max_rss_size,
+ hclge_vport_get_max_rss_size(vport));
+
+ /* Set to user value, no larger than max_rss_size. */
+ if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size &&
+ kinfo->req_rss_size <= max_rss_size) {
+ dev_info(&hdev->pdev->dev, "rss changes from %u to %u\n",
+ kinfo->rss_size, kinfo->req_rss_size);
+ kinfo->rss_size = kinfo->req_rss_size;
+ } else if (kinfo->rss_size > max_rss_size ||
+ (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) {
+ /* Set to the maximum specification value (max_rss_size). */
+ kinfo->rss_size = max_rss_size;
+ }
+}
+
+static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
+{
+ struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
+ struct hclge_dev *hdev = vport->back;
+ u8 i;
+
+ hclge_tm_update_kinfo_rss_size(vport);
+ kinfo->num_tqps = hclge_vport_get_tqp_num(vport);
+ vport->dwrr = 100; /* 100 percent as init */
+ vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit;
+
+ if (vport->vport_id == PF_VPORT_ID)
+ hdev->rss_cfg.rss_size = kinfo->rss_size;
+
+ /* when enable mqprio, the tc_info has been updated. */
+ if (kinfo->tc_info.mqprio_active)
+ return;
+
+ for (i = 0; i < HNAE3_MAX_TC; i++) {
+ if (hdev->hw_tc_map & BIT(i) && i < kinfo->tc_info.num_tc) {
+ kinfo->tc_info.tqp_offset[i] = i * kinfo->rss_size;
+ kinfo->tc_info.tqp_count[i] = kinfo->rss_size;
+ } else {
+ /* Set to default queue if TC is disable */
+ kinfo->tc_info.tqp_offset[i] = 0;
+ kinfo->tc_info.tqp_count[i] = 1;
+ }
+ }
+
+ memcpy(kinfo->tc_info.prio_tc, hdev->tm_info.prio_tc,
+ sizeof_field(struct hnae3_tc_info, prio_tc));
+}
+
+static void hclge_tm_vport_info_update(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport = hdev->vport;
+ u32 i;
+
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ hclge_tm_vport_tc_info_update(vport);
+
+ vport++;
+ }
+}
+
+static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
+{
+ u8 i, tc_sch_mode;
+ u32 bw_limit;
+
+ for (i = 0; i < hdev->tc_max; i++) {
+ if (i < hdev->tm_info.num_tc) {
+ tc_sch_mode = HCLGE_SCH_MODE_DWRR;
+ bw_limit = hdev->tm_info.pg_info[0].bw_limit;
+ } else {
+ tc_sch_mode = HCLGE_SCH_MODE_SP;
+ bw_limit = 0;
+ }
+
+ hdev->tm_info.tc_info[i].tc_id = i;
+ hdev->tm_info.tc_info[i].tc_sch_mode = tc_sch_mode;
+ hdev->tm_info.tc_info[i].pgid = 0;
+ hdev->tm_info.tc_info[i].bw_limit = bw_limit;
+ }
+
+ for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
+ hdev->tm_info.prio_tc[i] =
+ (i >= hdev->tm_info.num_tc) ? 0 : i;
+}
+
+static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
+{
+#define BW_PERCENT 100
+#define DEFAULT_BW_WEIGHT 1
+
+ u8 i;
+
+ for (i = 0; i < hdev->tm_info.num_pg; i++) {
+ int k;
+
+ hdev->tm_info.pg_dwrr[i] = i ? 0 : BW_PERCENT;
+
+ hdev->tm_info.pg_info[i].pg_id = i;
+ hdev->tm_info.pg_info[i].pg_sch_mode = HCLGE_SCH_MODE_DWRR;
+
+ hdev->tm_info.pg_info[i].bw_limit =
+ hdev->ae_dev->dev_specs.max_tm_rate;
+
+ if (i != 0)
+ continue;
+
+ hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map;
+ for (k = 0; k < hdev->tm_info.num_tc; k++)
+ hdev->tm_info.pg_info[i].tc_dwrr[k] = BW_PERCENT;
+ for (; k < HNAE3_MAX_TC; k++)
+ hdev->tm_info.pg_info[i].tc_dwrr[k] = DEFAULT_BW_WEIGHT;
+ }
+}
+
+static void hclge_update_fc_mode_by_dcb_flag(struct hclge_dev *hdev)
+{
+ if (hdev->tm_info.num_tc == 1 && !hdev->tm_info.pfc_en) {
+ if (hdev->fc_mode_last_time == HCLGE_FC_PFC)
+ dev_warn(&hdev->pdev->dev,
+ "Only 1 tc used, but last mode is FC_PFC\n");
+
+ hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
+ } else if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) {
+ /* fc_mode_last_time record the last fc_mode when
+ * DCB is enabled, so that fc_mode can be set to
+ * the correct value when DCB is disabled.
+ */
+ hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
+ hdev->tm_info.fc_mode = HCLGE_FC_PFC;
+ }
+}
+
+static void hclge_update_fc_mode(struct hclge_dev *hdev)
+{
+ if (!hdev->tm_info.pfc_en) {
+ hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
+ return;
+ }
+
+ if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) {
+ hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
+ hdev->tm_info.fc_mode = HCLGE_FC_PFC;
+ }
+}
+
+void hclge_tm_pfc_info_update(struct hclge_dev *hdev)
+{
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
+ hclge_update_fc_mode(hdev);
+ else
+ hclge_update_fc_mode_by_dcb_flag(hdev);
+}
+
+static void hclge_tm_schd_info_init(struct hclge_dev *hdev)
+{
+ hclge_tm_pg_info_init(hdev);
+
+ hclge_tm_tc_info_init(hdev);
+
+ hclge_tm_vport_info_update(hdev);
+
+ hclge_tm_pfc_info_update(hdev);
+}
+
+static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev)
+{
+ int ret;
+ u32 i;
+
+ if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
+ return 0;
+
+ for (i = 0; i < hdev->tm_info.num_pg; i++) {
+ /* Cfg mapping */
+ ret = hclge_tm_pg_to_pri_map_cfg(
+ hdev, i, hdev->tm_info.pg_info[i].tc_bit_map);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev)
+{
+ u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate;
+ struct hclge_shaper_ir_para ir_para;
+ u32 shaper_para;
+ int ret;
+ u32 i;
+
+ /* Cfg pg schd */
+ if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
+ return 0;
+
+ /* Pg to pri */
+ for (i = 0; i < hdev->tm_info.num_pg; i++) {
+ u32 rate = hdev->tm_info.pg_info[i].bw_limit;
+
+ /* Calc shaper para */
+ ret = hclge_shaper_para_calc(rate, HCLGE_SHAPER_LVL_PG,
+ &ir_para, max_tm_rate);
+ if (ret)
+ return ret;
+
+ shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
+ HCLGE_SHAPER_BS_U_DEF,
+ HCLGE_SHAPER_BS_S_DEF);
+ ret = hclge_tm_pg_shapping_cfg(hdev,
+ HCLGE_TM_SHAP_C_BUCKET, i,
+ shaper_para, rate);
+ if (ret)
+ return ret;
+
+ shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b,
+ ir_para.ir_u,
+ ir_para.ir_s,
+ HCLGE_SHAPER_BS_U_DEF,
+ HCLGE_SHAPER_BS_S_DEF);
+ ret = hclge_tm_pg_shapping_cfg(hdev,
+ HCLGE_TM_SHAP_P_BUCKET, i,
+ shaper_para, rate);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hclge_tm_pg_dwrr_cfg(struct hclge_dev *hdev)
+{
+ int ret;
+ u32 i;
+
+ /* cfg pg schd */
+ if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
+ return 0;
+
+ /* pg to prio */
+ for (i = 0; i < hdev->tm_info.num_pg; i++) {
+ /* Cfg dwrr */
+ ret = hclge_tm_pg_weight_cfg(hdev, i, hdev->tm_info.pg_dwrr[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hclge_vport_q_to_qs_map(struct hclge_dev *hdev,
+ struct hclge_vport *vport)
+{
+ struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
+ struct hnae3_tc_info *tc_info = &kinfo->tc_info;
+ struct hnae3_queue **tqp = kinfo->tqp;
+ u32 i, j;
+ int ret;
+
+ for (i = 0; i < tc_info->num_tc; i++) {
+ for (j = 0; j < tc_info->tqp_count[i]; j++) {
+ struct hnae3_queue *q = tqp[tc_info->tqp_offset[i] + j];
+
+ ret = hclge_tm_q_to_qs_map_cfg(hdev,
+ hclge_get_queue_id(q),
+ vport->qs_offset + i);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int hclge_tm_pri_q_qs_cfg_tc_base(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport = hdev->vport;
+ u16 i, k;
+ int ret;
+
+ /* Cfg qs -> pri mapping, one by one mapping */
+ for (k = 0; k < hdev->num_alloc_vport; k++) {
+ struct hnae3_knic_private_info *kinfo = &vport[k].nic.kinfo;
+
+ for (i = 0; i < kinfo->tc_info.max_tc; i++) {
+ u8 pri = i < kinfo->tc_info.num_tc ? i : 0;
+ bool link_vld = i < kinfo->tc_info.num_tc;
+
+ ret = hclge_tm_qs_to_pri_map_cfg(hdev,
+ vport[k].qs_offset + i,
+ pri, link_vld);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int hclge_tm_pri_q_qs_cfg_vnet_base(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport = hdev->vport;
+ u16 i, k;
+ int ret;
+
+ /* Cfg qs -> pri mapping, qs = tc, pri = vf, 8 qs -> 1 pri */
+ for (k = 0; k < hdev->num_alloc_vport; k++)
+ for (i = 0; i < HNAE3_MAX_TC; i++) {
+ ret = hclge_tm_qs_to_pri_map_cfg(hdev,
+ vport[k].qs_offset + i,
+ k, true);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport = hdev->vport;
+ int ret;
+ u32 i;
+
+ if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE)
+ ret = hclge_tm_pri_q_qs_cfg_tc_base(hdev);
+ else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE)
+ ret = hclge_tm_pri_q_qs_cfg_vnet_base(hdev);
+ else
+ return -EINVAL;
+
+ if (ret)
+ return ret;
+
+ /* Cfg q -> qs mapping */
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ ret = hclge_vport_q_to_qs_map(hdev, vport);
+ if (ret)
+ return ret;
+
+ vport++;
+ }
+
+ return 0;
+}
+
+static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev)
+{
+ u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate;
+ struct hclge_shaper_ir_para ir_para;
+ u32 shaper_para_c, shaper_para_p;
+ int ret;
+ u32 i;
+
+ for (i = 0; i < hdev->tc_max; i++) {
+ u32 rate = hdev->tm_info.tc_info[i].bw_limit;
+
+ if (rate) {
+ ret = hclge_shaper_para_calc(rate, HCLGE_SHAPER_LVL_PRI,
+ &ir_para, max_tm_rate);
+ if (ret)
+ return ret;
+
+ shaper_para_c = hclge_tm_get_shapping_para(0, 0, 0,
+ HCLGE_SHAPER_BS_U_DEF,
+ HCLGE_SHAPER_BS_S_DEF);
+ shaper_para_p = hclge_tm_get_shapping_para(ir_para.ir_b,
+ ir_para.ir_u,
+ ir_para.ir_s,
+ HCLGE_SHAPER_BS_U_DEF,
+ HCLGE_SHAPER_BS_S_DEF);
+ } else {
+ shaper_para_c = 0;
+ shaper_para_p = 0;
+ }
+
+ ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, i,
+ shaper_para_c, rate);
+ if (ret)
+ return ret;
+
+ ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, i,
+ shaper_para_p, rate);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport)
+{
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_shaper_ir_para ir_para;
+ u32 shaper_para;
+ int ret;
+
+ ret = hclge_shaper_para_calc(vport->bw_limit, HCLGE_SHAPER_LVL_VF,
+ &ir_para,
+ hdev->ae_dev->dev_specs.max_tm_rate);
+ if (ret)
+ return ret;
+
+ shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
+ HCLGE_SHAPER_BS_U_DEF,
+ HCLGE_SHAPER_BS_S_DEF);
+ ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET,
+ vport->vport_id, shaper_para,
+ vport->bw_limit);
+ if (ret)
+ return ret;
+
+ shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u,
+ ir_para.ir_s,
+ HCLGE_SHAPER_BS_U_DEF,
+ HCLGE_SHAPER_BS_S_DEF);
+ ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET,
+ vport->vport_id, shaper_para,
+ vport->bw_limit);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int hclge_tm_pri_vnet_base_shaper_qs_cfg(struct hclge_vport *vport)
+{
+ struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
+ struct hclge_dev *hdev = vport->back;
+ u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate;
+ struct hclge_shaper_ir_para ir_para;
+ u32 i;
+ int ret;
+
+ for (i = 0; i < kinfo->tc_info.num_tc; i++) {
+ ret = hclge_shaper_para_calc(hdev->tm_info.tc_info[i].bw_limit,
+ HCLGE_SHAPER_LVL_QSET,
+ &ir_para, max_tm_rate);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hclge_tm_pri_vnet_base_shaper_cfg(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport = hdev->vport;
+ int ret;
+ u32 i;
+
+ /* Need config vport shaper */
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ ret = hclge_tm_pri_vnet_base_shaper_pri_cfg(vport);
+ if (ret)
+ return ret;
+
+ ret = hclge_tm_pri_vnet_base_shaper_qs_cfg(vport);
+ if (ret)
+ return ret;
+
+ vport++;
+ }
+
+ return 0;
+}
+
+static int hclge_tm_pri_shaper_cfg(struct hclge_dev *hdev)
+{
+ int ret;
+
+ if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
+ ret = hclge_tm_pri_tc_base_shaper_cfg(hdev);
+ if (ret)
+ return ret;
+ } else {
+ ret = hclge_tm_pri_vnet_base_shaper_cfg(hdev);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport = hdev->vport;
+ struct hclge_pg_info *pg_info;
+ u8 dwrr;
+ int ret;
+ u32 i, k;
+
+ for (i = 0; i < hdev->tc_max; i++) {
+ pg_info =
+ &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
+ dwrr = pg_info->tc_dwrr[i];
+
+ ret = hclge_tm_pri_weight_cfg(hdev, i, dwrr);
+ if (ret)
+ return ret;
+
+ for (k = 0; k < hdev->num_alloc_vport; k++) {
+ struct hnae3_knic_private_info *kinfo = &vport[k].nic.kinfo;
+
+ if (i >= kinfo->tc_info.max_tc)
+ continue;
+
+ dwrr = i < kinfo->tc_info.num_tc ? vport[k].dwrr : 0;
+ ret = hclge_tm_qs_weight_cfg(
+ hdev, vport[k].qs_offset + i,
+ dwrr);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int hclge_tm_ets_tc_dwrr_cfg(struct hclge_dev *hdev)
+{
+#define DEFAULT_TC_OFFSET 14
+
+ struct hclge_ets_tc_weight_cmd *ets_weight;
+ struct hclge_desc desc;
+ unsigned int i;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, false);
+ ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data;
+
+ for (i = 0; i < HNAE3_MAX_TC; i++) {
+ struct hclge_pg_info *pg_info;
+
+ pg_info = &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
+ ets_weight->tc_weight[i] = pg_info->tc_dwrr[i];
+ }
+
+ ets_weight->weight_offset = DEFAULT_TC_OFFSET;
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
+static int hclge_tm_pri_vnet_base_dwrr_pri_cfg(struct hclge_vport *vport)
+{
+ struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+ u8 i;
+
+ /* Vf dwrr */
+ ret = hclge_tm_pri_weight_cfg(hdev, vport->vport_id, vport->dwrr);
+ if (ret)
+ return ret;
+
+ /* Qset dwrr */
+ for (i = 0; i < kinfo->tc_info.num_tc; i++) {
+ ret = hclge_tm_qs_weight_cfg(
+ hdev, vport->qs_offset + i,
+ hdev->tm_info.pg_info[0].tc_dwrr[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hclge_tm_pri_vnet_base_dwrr_cfg(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport = hdev->vport;
+ int ret;
+ u32 i;
+
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ ret = hclge_tm_pri_vnet_base_dwrr_pri_cfg(vport);
+ if (ret)
+ return ret;
+
+ vport++;
+ }
+
+ return 0;
+}
+
+static int hclge_tm_pri_dwrr_cfg(struct hclge_dev *hdev)
+{
+ int ret;
+
+ if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
+ ret = hclge_tm_pri_tc_base_dwrr_cfg(hdev);
+ if (ret)
+ return ret;
+
+ if (!hnae3_dev_dcb_supported(hdev))
+ return 0;
+
+ ret = hclge_tm_ets_tc_dwrr_cfg(hdev);
+ if (ret == -EOPNOTSUPP) {
+ dev_warn(&hdev->pdev->dev,
+ "fw %08x doesn't support ets tc weight cmd\n",
+ hdev->fw_version);
+ ret = 0;
+ }
+
+ return ret;
+ } else {
+ ret = hclge_tm_pri_vnet_base_dwrr_cfg(hdev);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hclge_tm_map_cfg(struct hclge_dev *hdev)
+{
+ int ret;
+
+ ret = hclge_up_to_tc_map(hdev);
+ if (ret)
+ return ret;
+
+ if (hdev->vport[0].nic.kinfo.tc_map_mode == HNAE3_TC_MAP_MODE_DSCP) {
+ ret = hclge_dscp_to_tc_map(hdev);
+ if (ret)
+ return ret;
+ }
+
+ ret = hclge_tm_pg_to_pri_map(hdev);
+ if (ret)
+ return ret;
+
+ return hclge_tm_pri_q_qs_cfg(hdev);
+}
+
+static int hclge_tm_shaper_cfg(struct hclge_dev *hdev)
+{
+ int ret;
+
+ ret = hclge_tm_port_shaper_cfg(hdev);
+ if (ret)
+ return ret;
+
+ ret = hclge_tm_pg_shaper_cfg(hdev);
+ if (ret)
+ return ret;
+
+ return hclge_tm_pri_shaper_cfg(hdev);
+}
+
+int hclge_tm_dwrr_cfg(struct hclge_dev *hdev)
+{
+ int ret;
+
+ ret = hclge_tm_pg_dwrr_cfg(hdev);
+ if (ret)
+ return ret;
+
+ return hclge_tm_pri_dwrr_cfg(hdev);
+}
+
+static int hclge_tm_lvl2_schd_mode_cfg(struct hclge_dev *hdev)
+{
+ int ret;
+ u8 i;
+
+ /* Only being config on TC-Based scheduler mode */
+ if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE)
+ return 0;
+
+ for (i = 0; i < hdev->tm_info.num_pg; i++) {
+ ret = hclge_tm_pg_schd_mode_cfg(hdev, i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hclge_tm_schd_mode_tc_base_cfg(struct hclge_dev *hdev, u8 pri_id)
+{
+ struct hclge_vport *vport = hdev->vport;
+ int ret;
+ u8 mode;
+ u16 i;
+
+ ret = hclge_tm_pri_schd_mode_cfg(hdev, pri_id);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ struct hnae3_knic_private_info *kinfo = &vport[i].nic.kinfo;
+
+ if (pri_id >= kinfo->tc_info.max_tc)
+ continue;
+
+ mode = pri_id < kinfo->tc_info.num_tc ? HCLGE_SCH_MODE_DWRR :
+ HCLGE_SCH_MODE_SP;
+ ret = hclge_tm_qs_schd_mode_cfg(hdev,
+ vport[i].qs_offset + pri_id,
+ mode);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport)
+{
+ struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+ u8 i;
+
+ if (vport->vport_id >= HNAE3_MAX_TC)
+ return -EINVAL;
+
+ ret = hclge_tm_pri_schd_mode_cfg(hdev, vport->vport_id);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < kinfo->tc_info.num_tc; i++) {
+ u8 sch_mode = hdev->tm_info.tc_info[i].tc_sch_mode;
+
+ ret = hclge_tm_qs_schd_mode_cfg(hdev, vport->qs_offset + i,
+ sch_mode);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport = hdev->vport;
+ int ret;
+ u8 i;
+
+ if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
+ for (i = 0; i < hdev->tc_max; i++) {
+ ret = hclge_tm_schd_mode_tc_base_cfg(hdev, i);
+ if (ret)
+ return ret;
+ }
+ } else {
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ ret = hclge_tm_schd_mode_vnet_base_cfg(vport);
+ if (ret)
+ return ret;
+
+ vport++;
+ }
+ }
+
+ return 0;
+}
+
+static int hclge_tm_schd_mode_hw(struct hclge_dev *hdev)
+{
+ int ret;
+
+ ret = hclge_tm_lvl2_schd_mode_cfg(hdev);
+ if (ret)
+ return ret;
+
+ return hclge_tm_lvl34_schd_mode_cfg(hdev);
+}
+
+int hclge_tm_schd_setup_hw(struct hclge_dev *hdev)
+{
+ int ret;
+
+ /* Cfg tm mapping */
+ ret = hclge_tm_map_cfg(hdev);
+ if (ret)
+ return ret;
+
+ /* Cfg tm shaper */
+ ret = hclge_tm_shaper_cfg(hdev);
+ if (ret)
+ return ret;
+
+ /* Cfg dwrr */
+ ret = hclge_tm_dwrr_cfg(hdev);
+ if (ret)
+ return ret;
+
+ /* Cfg schd mode for each level schd */
+ ret = hclge_tm_schd_mode_hw(hdev);
+ if (ret)
+ return ret;
+
+ return hclge_tm_flush_cfg(hdev, false);
+}
+
+static int hclge_pause_param_setup_hw(struct hclge_dev *hdev)
+{
+ struct hclge_mac *mac = &hdev->hw.mac;
+
+ return hclge_pause_param_cfg(hdev, mac->mac_addr,
+ HCLGE_DEFAULT_PAUSE_TRANS_GAP,
+ HCLGE_DEFAULT_PAUSE_TRANS_TIME);
+}
+
+static int hclge_pfc_setup_hw(struct hclge_dev *hdev)
+{
+ u8 enable_bitmap = 0;
+
+ if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
+ enable_bitmap = HCLGE_TX_MAC_PAUSE_EN_MSK |
+ HCLGE_RX_MAC_PAUSE_EN_MSK;
+
+ return hclge_pfc_pause_en_cfg(hdev, enable_bitmap,
+ hdev->tm_info.pfc_en);
+}
+
+/* for the queues that use for backpress, divides to several groups,
+ * each group contains 32 queue sets, which can be represented by u32 bitmap.
+ */
+static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc)
+{
+ u16 grp_id_shift = HCLGE_BP_GRP_ID_S;
+ u16 grp_id_mask = HCLGE_BP_GRP_ID_M;
+ u8 grp_num = HCLGE_BP_GRP_NUM;
+ int i;
+
+ if (hdev->num_tqps > HCLGE_TQP_MAX_SIZE_DEV_V2) {
+ grp_num = HCLGE_BP_EXT_GRP_NUM;
+ grp_id_mask = HCLGE_BP_EXT_GRP_ID_M;
+ grp_id_shift = HCLGE_BP_EXT_GRP_ID_S;
+ }
+
+ for (i = 0; i < grp_num; i++) {
+ u32 qs_bitmap = 0;
+ int k, ret;
+
+ for (k = 0; k < hdev->num_alloc_vport; k++) {
+ struct hclge_vport *vport = &hdev->vport[k];
+ u16 qs_id = vport->qs_offset + tc;
+ u8 grp, sub_grp;
+
+ grp = hnae3_get_field(qs_id, grp_id_mask, grp_id_shift);
+ sub_grp = hnae3_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M,
+ HCLGE_BP_SUB_GRP_ID_S);
+ if (i == grp)
+ qs_bitmap |= (1 << sub_grp);
+ }
+
+ ret = hclge_tm_qs_bp_cfg(hdev, tc, i, qs_bitmap);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int hclge_mac_pause_setup_hw(struct hclge_dev *hdev)
+{
+ bool tx_en, rx_en;
+
+ switch (hdev->tm_info.fc_mode) {
+ case HCLGE_FC_NONE:
+ tx_en = false;
+ rx_en = false;
+ break;
+ case HCLGE_FC_RX_PAUSE:
+ tx_en = false;
+ rx_en = true;
+ break;
+ case HCLGE_FC_TX_PAUSE:
+ tx_en = true;
+ rx_en = false;
+ break;
+ case HCLGE_FC_FULL:
+ tx_en = true;
+ rx_en = true;
+ break;
+ case HCLGE_FC_PFC:
+ tx_en = false;
+ rx_en = false;
+ break;
+ default:
+ tx_en = true;
+ rx_en = true;
+ }
+
+ return hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
+}
+
+static int hclge_tm_bp_setup(struct hclge_dev *hdev)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < hdev->tm_info.num_tc; i++) {
+ ret = hclge_bp_setup_hw(hdev, i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init)
+{
+ int ret;
+
+ ret = hclge_pause_param_setup_hw(hdev);
+ if (ret)
+ return ret;
+
+ ret = hclge_mac_pause_setup_hw(hdev);
+ if (ret)
+ return ret;
+
+ /* Only DCB-supported dev supports qset back pressure and pfc cmd */
+ if (!hnae3_dev_dcb_supported(hdev))
+ return 0;
+
+ /* GE MAC does not support PFC, when driver is initializing and MAC
+ * is in GE Mode, ignore the error here, otherwise initialization
+ * will fail.
+ */
+ ret = hclge_pfc_setup_hw(hdev);
+ if (init && ret == -EOPNOTSUPP)
+ dev_warn(&hdev->pdev->dev, "GE MAC does not support pfc\n");
+ else if (ret) {
+ dev_err(&hdev->pdev->dev, "config pfc failed! ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ return hclge_tm_bp_setup(hdev);
+}
+
+void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc)
+{
+ struct hclge_vport *vport = hdev->vport;
+ struct hnae3_knic_private_info *kinfo;
+ u32 i, k;
+
+ for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
+ hdev->tm_info.prio_tc[i] = prio_tc[i];
+
+ for (k = 0; k < hdev->num_alloc_vport; k++) {
+ kinfo = &vport[k].nic.kinfo;
+ kinfo->tc_info.prio_tc[i] = prio_tc[i];
+ }
+ }
+}
+
+void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
+{
+ u8 bit_map = 0;
+ u8 i;
+
+ hdev->tm_info.num_tc = num_tc;
+
+ for (i = 0; i < hdev->tm_info.num_tc; i++)
+ bit_map |= BIT(i);
+
+ if (!bit_map) {
+ bit_map = 1;
+ hdev->tm_info.num_tc = 1;
+ }
+
+ hdev->hw_tc_map = bit_map;
+
+ hclge_tm_schd_info_init(hdev);
+}
+
+int hclge_tm_init_hw(struct hclge_dev *hdev, bool init)
+{
+ int ret;
+
+ if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) &&
+ (hdev->tx_sch_mode != HCLGE_FLAG_VNET_BASE_SCH_MODE))
+ return -ENOTSUPP;
+
+ ret = hclge_tm_schd_setup_hw(hdev);
+ if (ret)
+ return ret;
+
+ ret = hclge_pause_setup_hw(hdev, init);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int hclge_tm_schd_init(struct hclge_dev *hdev)
+{
+ /* fc_mode is HCLGE_FC_FULL on reset */
+ hdev->tm_info.fc_mode = HCLGE_FC_FULL;
+ hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
+
+ if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE &&
+ hdev->tm_info.num_pg != 1)
+ return -EINVAL;
+
+ hclge_tm_schd_info_init(hdev);
+ hclge_dscp_to_prio_map_init(hdev);
+
+ return hclge_tm_init_hw(hdev, true);
+}
+
+int hclge_tm_vport_map_update(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport = hdev->vport;
+ int ret;
+
+ hclge_tm_vport_tc_info_update(vport);
+
+ ret = hclge_vport_q_to_qs_map(hdev, vport);
+ if (ret)
+ return ret;
+
+ if (hdev->tm_info.num_tc == 1 && !hdev->tm_info.pfc_en)
+ return 0;
+
+ return hclge_tm_bp_setup(hdev);
+}
+
+int hclge_tm_get_qset_num(struct hclge_dev *hdev, u16 *qset_num)
+{
+ struct hclge_tm_nodes_cmd *nodes;
+ struct hclge_desc desc;
+ int ret;
+
+ if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) {
+ /* Each PF has 8 qsets and each VF has 1 qset */
+ *qset_num = HCLGE_TM_PF_MAX_QSET_NUM + pci_num_vf(hdev->pdev);
+ return 0;
+ }
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get qset num, ret = %d\n", ret);
+ return ret;
+ }
+
+ nodes = (struct hclge_tm_nodes_cmd *)desc.data;
+ *qset_num = le16_to_cpu(nodes->qset_num);
+ return 0;
+}
+
+int hclge_tm_get_pri_num(struct hclge_dev *hdev, u8 *pri_num)
+{
+ struct hclge_tm_nodes_cmd *nodes;
+ struct hclge_desc desc;
+ int ret;
+
+ if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) {
+ *pri_num = HCLGE_TM_PF_MAX_PRI_NUM;
+ return 0;
+ }
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get pri num, ret = %d\n", ret);
+ return ret;
+ }
+
+ nodes = (struct hclge_tm_nodes_cmd *)desc.data;
+ *pri_num = nodes->pri_num;
+ return 0;
+}
+
+int hclge_tm_get_qset_map_pri(struct hclge_dev *hdev, u16 qset_id, u8 *priority,
+ u8 *link_vld)
+{
+ struct hclge_qs_to_pri_link_cmd *map;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, true);
+ map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
+ map->qs_id = cpu_to_le16(qset_id);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get qset map priority, ret = %d\n", ret);
+ return ret;
+ }
+
+ *priority = map->priority;
+ *link_vld = map->link_vld;
+ return 0;
+}
+
+int hclge_tm_get_qset_sch_mode(struct hclge_dev *hdev, u16 qset_id, u8 *mode)
+{
+ struct hclge_qs_sch_mode_cfg_cmd *qs_sch_mode;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, true);
+ qs_sch_mode = (struct hclge_qs_sch_mode_cfg_cmd *)desc.data;
+ qs_sch_mode->qs_id = cpu_to_le16(qset_id);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get qset sch mode, ret = %d\n", ret);
+ return ret;
+ }
+
+ *mode = qs_sch_mode->sch_mode;
+ return 0;
+}
+
+int hclge_tm_get_qset_weight(struct hclge_dev *hdev, u16 qset_id, u8 *weight)
+{
+ struct hclge_qs_weight_cmd *qs_weight;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, true);
+ qs_weight = (struct hclge_qs_weight_cmd *)desc.data;
+ qs_weight->qs_id = cpu_to_le16(qset_id);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get qset weight, ret = %d\n", ret);
+ return ret;
+ }
+
+ *weight = qs_weight->dwrr;
+ return 0;
+}
+
+int hclge_tm_get_qset_shaper(struct hclge_dev *hdev, u16 qset_id,
+ struct hclge_tm_shaper_para *para)
+{
+ struct hclge_qs_shapping_cmd *shap_cfg_cmd;
+ struct hclge_desc desc;
+ u32 shapping_para;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG, true);
+ shap_cfg_cmd = (struct hclge_qs_shapping_cmd *)desc.data;
+ shap_cfg_cmd->qs_id = cpu_to_le16(qset_id);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get qset %u shaper, ret = %d\n", qset_id,
+ ret);
+ return ret;
+ }
+
+ shapping_para = le32_to_cpu(shap_cfg_cmd->qs_shapping_para);
+ para->ir_b = hclge_tm_get_field(shapping_para, IR_B);
+ para->ir_u = hclge_tm_get_field(shapping_para, IR_U);
+ para->ir_s = hclge_tm_get_field(shapping_para, IR_S);
+ para->bs_b = hclge_tm_get_field(shapping_para, BS_B);
+ para->bs_s = hclge_tm_get_field(shapping_para, BS_S);
+ para->flag = shap_cfg_cmd->flag;
+ para->rate = le32_to_cpu(shap_cfg_cmd->qs_rate);
+ return 0;
+}
+
+int hclge_tm_get_pri_sch_mode(struct hclge_dev *hdev, u8 pri_id, u8 *mode)
+{
+ struct hclge_pri_sch_mode_cfg_cmd *pri_sch_mode;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, true);
+ pri_sch_mode = (struct hclge_pri_sch_mode_cfg_cmd *)desc.data;
+ pri_sch_mode->pri_id = pri_id;
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get priority sch mode, ret = %d\n", ret);
+ return ret;
+ }
+
+ *mode = pri_sch_mode->sch_mode;
+ return 0;
+}
+
+int hclge_tm_get_pri_weight(struct hclge_dev *hdev, u8 pri_id, u8 *weight)
+{
+ struct hclge_priority_weight_cmd *priority_weight;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, true);
+ priority_weight = (struct hclge_priority_weight_cmd *)desc.data;
+ priority_weight->pri_id = pri_id;
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get priority weight, ret = %d\n", ret);
+ return ret;
+ }
+
+ *weight = priority_weight->dwrr;
+ return 0;
+}
+
+int hclge_tm_get_pri_shaper(struct hclge_dev *hdev, u8 pri_id,
+ enum hclge_opcode_type cmd,
+ struct hclge_tm_shaper_para *para)
+{
+ struct hclge_pri_shapping_cmd *shap_cfg_cmd;
+ struct hclge_desc desc;
+ u32 shapping_para;
+ int ret;
+
+ if (cmd != HCLGE_OPC_TM_PRI_C_SHAPPING &&
+ cmd != HCLGE_OPC_TM_PRI_P_SHAPPING)
+ return -EINVAL;
+
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
+ shap_cfg_cmd->pri_id = pri_id;
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get priority shaper(%#x), ret = %d\n",
+ cmd, ret);
+ return ret;
+ }
+
+ shapping_para = le32_to_cpu(shap_cfg_cmd->pri_shapping_para);
+ para->ir_b = hclge_tm_get_field(shapping_para, IR_B);
+ para->ir_u = hclge_tm_get_field(shapping_para, IR_U);
+ para->ir_s = hclge_tm_get_field(shapping_para, IR_S);
+ para->bs_b = hclge_tm_get_field(shapping_para, BS_B);
+ para->bs_s = hclge_tm_get_field(shapping_para, BS_S);
+ para->flag = shap_cfg_cmd->flag;
+ para->rate = le32_to_cpu(shap_cfg_cmd->pri_rate);
+ return 0;
+}
+
+int hclge_tm_get_q_to_qs_map(struct hclge_dev *hdev, u16 q_id, u16 *qset_id)
+{
+ struct hclge_nq_to_qs_link_cmd *map;
+ struct hclge_desc desc;
+ u16 qs_id_l;
+ u16 qs_id_h;
+ int ret;
+
+ map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, true);
+ map->nq_id = cpu_to_le16(q_id);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get queue to qset map, ret = %d\n", ret);
+ return ret;
+ }
+ *qset_id = le16_to_cpu(map->qset_id);
+
+ /* convert qset_id to the following format, drop the vld bit
+ * | qs_id_h | vld | qs_id_l |
+ * qset_id: | 15 ~ 11 | 10 | 9 ~ 0 |
+ * \ \ / /
+ * \ \ / /
+ * qset_id: | 15 | 14 ~ 10 | 9 ~ 0 |
+ */
+ qs_id_l = hnae3_get_field(*qset_id, HCLGE_TM_QS_ID_L_MSK,
+ HCLGE_TM_QS_ID_L_S);
+ qs_id_h = hnae3_get_field(*qset_id, HCLGE_TM_QS_ID_H_EXT_MSK,
+ HCLGE_TM_QS_ID_H_EXT_S);
+ *qset_id = 0;
+ hnae3_set_field(*qset_id, HCLGE_TM_QS_ID_L_MSK, HCLGE_TM_QS_ID_L_S,
+ qs_id_l);
+ hnae3_set_field(*qset_id, HCLGE_TM_QS_ID_H_MSK, HCLGE_TM_QS_ID_H_S,
+ qs_id_h);
+ return 0;
+}
+
+int hclge_tm_get_q_to_tc(struct hclge_dev *hdev, u16 q_id, u8 *tc_id)
+{
+#define HCLGE_TM_TC_MASK 0x7
+
+ struct hclge_tqp_tx_queue_tc_cmd *tc;
+ struct hclge_desc desc;
+ int ret;
+
+ tc = (struct hclge_tqp_tx_queue_tc_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TQP_TX_QUEUE_TC, true);
+ tc->queue_id = cpu_to_le16(q_id);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get queue to tc map, ret = %d\n", ret);
+ return ret;
+ }
+
+ *tc_id = tc->tc_id & HCLGE_TM_TC_MASK;
+ return 0;
+}
+
+int hclge_tm_get_pg_to_pri_map(struct hclge_dev *hdev, u8 pg_id,
+ u8 *pri_bit_map)
+{
+ struct hclge_pg_to_pri_link_cmd *map;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_TO_PRI_LINK, true);
+ map = (struct hclge_pg_to_pri_link_cmd *)desc.data;
+ map->pg_id = pg_id;
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get pg to pri map, ret = %d\n", ret);
+ return ret;
+ }
+
+ *pri_bit_map = map->pri_bit_map;
+ return 0;
+}
+
+int hclge_tm_get_pg_weight(struct hclge_dev *hdev, u8 pg_id, u8 *weight)
+{
+ struct hclge_pg_weight_cmd *pg_weight_cmd;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_WEIGHT, true);
+ pg_weight_cmd = (struct hclge_pg_weight_cmd *)desc.data;
+ pg_weight_cmd->pg_id = pg_id;
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get pg weight, ret = %d\n", ret);
+ return ret;
+ }
+
+ *weight = pg_weight_cmd->dwrr;
+ return 0;
+}
+
+int hclge_tm_get_pg_sch_mode(struct hclge_dev *hdev, u8 pg_id, u8 *mode)
+{
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_SCH_MODE_CFG, true);
+ desc.data[0] = cpu_to_le32(pg_id);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get pg sch mode, ret = %d\n", ret);
+ return ret;
+ }
+
+ *mode = (u8)le32_to_cpu(desc.data[1]);
+ return 0;
+}
+
+int hclge_tm_get_pg_shaper(struct hclge_dev *hdev, u8 pg_id,
+ enum hclge_opcode_type cmd,
+ struct hclge_tm_shaper_para *para)
+{
+ struct hclge_pg_shapping_cmd *shap_cfg_cmd;
+ struct hclge_desc desc;
+ u32 shapping_para;
+ int ret;
+
+ if (cmd != HCLGE_OPC_TM_PG_C_SHAPPING &&
+ cmd != HCLGE_OPC_TM_PG_P_SHAPPING)
+ return -EINVAL;
+
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
+ shap_cfg_cmd->pg_id = pg_id;
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get pg shaper(%#x), ret = %d\n",
+ cmd, ret);
+ return ret;
+ }
+
+ shapping_para = le32_to_cpu(shap_cfg_cmd->pg_shapping_para);
+ para->ir_b = hclge_tm_get_field(shapping_para, IR_B);
+ para->ir_u = hclge_tm_get_field(shapping_para, IR_U);
+ para->ir_s = hclge_tm_get_field(shapping_para, IR_S);
+ para->bs_b = hclge_tm_get_field(shapping_para, BS_B);
+ para->bs_s = hclge_tm_get_field(shapping_para, BS_S);
+ para->flag = shap_cfg_cmd->flag;
+ para->rate = le32_to_cpu(shap_cfg_cmd->pg_rate);
+ return 0;
+}
+
+int hclge_tm_get_port_shaper(struct hclge_dev *hdev,
+ struct hclge_tm_shaper_para *para)
+{
+ struct hclge_port_shapping_cmd *port_shap_cfg_cmd;
+ struct hclge_desc desc;
+ u32 shapping_para;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get port shaper, ret = %d\n", ret);
+ return ret;
+ }
+
+ port_shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
+ shapping_para = le32_to_cpu(port_shap_cfg_cmd->port_shapping_para);
+ para->ir_b = hclge_tm_get_field(shapping_para, IR_B);
+ para->ir_u = hclge_tm_get_field(shapping_para, IR_U);
+ para->ir_s = hclge_tm_get_field(shapping_para, IR_S);
+ para->bs_b = hclge_tm_get_field(shapping_para, BS_B);
+ para->bs_s = hclge_tm_get_field(shapping_para, BS_S);
+ para->flag = port_shap_cfg_cmd->flag;
+ para->rate = le32_to_cpu(port_shap_cfg_cmd->port_rate);
+
+ return 0;
+}
+
+int hclge_tm_flush_cfg(struct hclge_dev *hdev, bool enable)
+{
+ struct hclge_desc desc;
+ int ret;
+
+ if (!hnae3_ae_dev_tm_flush_supported(hdev))
+ return 0;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_FLUSH, false);
+
+ desc.data[0] = cpu_to_le32(enable ? HCLGE_TM_FLUSH_EN_MSK : 0);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to config tm flush, ret = %d\n", ret);
+ return ret;
+ }
+
+ if (enable)
+ msleep(HCLGE_TM_FLUSH_TIME_MS);
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
new file mode 100644
index 0000000000..53eec6df51
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
@@ -0,0 +1,280 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+// Copyright (c) 2016-2017 Hisilicon Limited.
+
+#ifndef __HCLGE_TM_H
+#define __HCLGE_TM_H
+
+#include <linux/types.h>
+
+#include "hnae3.h"
+
+struct hclge_dev;
+struct hclge_vport;
+enum hclge_opcode_type;
+
+/* MAC Pause */
+#define HCLGE_TX_MAC_PAUSE_EN_MSK BIT(0)
+#define HCLGE_RX_MAC_PAUSE_EN_MSK BIT(1)
+
+#define HCLGE_TM_PORT_BASE_MODE_MSK BIT(0)
+
+#define HCLGE_DEFAULT_PAUSE_TRANS_GAP 0x7F
+#define HCLGE_DEFAULT_PAUSE_TRANS_TIME 0xFFFF
+
+/* SP or DWRR */
+#define HCLGE_TM_TX_SCHD_DWRR_MSK BIT(0)
+#define HCLGE_TM_TX_SCHD_SP_MSK 0xFE
+
+#define HCLGE_ETHER_MAX_RATE 100000
+
+#define HCLGE_TM_PF_MAX_PRI_NUM 8
+#define HCLGE_TM_PF_MAX_QSET_NUM 8
+
+#define HCLGE_DSCP_MAP_TC_BD_NUM 2
+#define HCLGE_DSCP_TC_SHIFT(n) (((n) & 1) * 4)
+
+#define HCLGE_TM_FLUSH_TIME_MS 10
+#define HCLGE_TM_FLUSH_EN_MSK BIT(0)
+
+struct hclge_pg_to_pri_link_cmd {
+ u8 pg_id;
+ u8 rsvd1[3];
+ u8 pri_bit_map;
+};
+
+struct hclge_qs_to_pri_link_cmd {
+ __le16 qs_id;
+ __le16 rsvd;
+ u8 priority;
+#define HCLGE_TM_QS_PRI_LINK_VLD_MSK BIT(0)
+ u8 link_vld;
+};
+
+struct hclge_nq_to_qs_link_cmd {
+ __le16 nq_id;
+ __le16 rsvd;
+#define HCLGE_TM_Q_QS_LINK_VLD_MSK BIT(10)
+#define HCLGE_TM_QS_ID_L_MSK GENMASK(9, 0)
+#define HCLGE_TM_QS_ID_L_S 0
+#define HCLGE_TM_QS_ID_H_MSK GENMASK(14, 10)
+#define HCLGE_TM_QS_ID_H_S 10
+#define HCLGE_TM_QS_ID_H_EXT_S 11
+#define HCLGE_TM_QS_ID_H_EXT_MSK GENMASK(15, 11)
+ __le16 qset_id;
+};
+
+struct hclge_tqp_tx_queue_tc_cmd {
+ __le16 queue_id;
+ __le16 rsvd;
+ u8 tc_id;
+ u8 rev[3];
+};
+
+struct hclge_pg_weight_cmd {
+ u8 pg_id;
+ u8 dwrr;
+};
+
+struct hclge_priority_weight_cmd {
+ u8 pri_id;
+ u8 dwrr;
+};
+
+struct hclge_pri_sch_mode_cfg_cmd {
+ u8 pri_id;
+ u8 rsvd[3];
+ u8 sch_mode;
+};
+
+struct hclge_qs_sch_mode_cfg_cmd {
+ __le16 qs_id;
+ u8 rsvd[2];
+ u8 sch_mode;
+};
+
+struct hclge_qs_weight_cmd {
+ __le16 qs_id;
+ u8 dwrr;
+};
+
+struct hclge_ets_tc_weight_cmd {
+ u8 tc_weight[HNAE3_MAX_TC];
+ u8 weight_offset;
+ u8 rsvd[15];
+};
+
+#define HCLGE_TM_SHAP_IR_B_MSK GENMASK(7, 0)
+#define HCLGE_TM_SHAP_IR_B_LSH 0
+#define HCLGE_TM_SHAP_IR_U_MSK GENMASK(11, 8)
+#define HCLGE_TM_SHAP_IR_U_LSH 8
+#define HCLGE_TM_SHAP_IR_S_MSK GENMASK(15, 12)
+#define HCLGE_TM_SHAP_IR_S_LSH 12
+#define HCLGE_TM_SHAP_BS_B_MSK GENMASK(20, 16)
+#define HCLGE_TM_SHAP_BS_B_LSH 16
+#define HCLGE_TM_SHAP_BS_S_MSK GENMASK(25, 21)
+#define HCLGE_TM_SHAP_BS_S_LSH 21
+
+enum hclge_shap_bucket {
+ HCLGE_TM_SHAP_C_BUCKET = 0,
+ HCLGE_TM_SHAP_P_BUCKET,
+};
+
+/* set bit HCLGE_TM_RATE_VLD to 1 means use 'rate' to config shaping */
+#define HCLGE_TM_RATE_VLD 0
+
+struct hclge_pri_shapping_cmd {
+ u8 pri_id;
+ u8 rsvd[3];
+ __le32 pri_shapping_para;
+ u8 flag;
+ u8 rsvd1[3];
+ __le32 pri_rate;
+};
+
+struct hclge_pg_shapping_cmd {
+ u8 pg_id;
+ u8 rsvd[3];
+ __le32 pg_shapping_para;
+ u8 flag;
+ u8 rsvd1[3];
+ __le32 pg_rate;
+};
+
+struct hclge_qs_shapping_cmd {
+ __le16 qs_id;
+ u8 rsvd[2];
+ __le32 qs_shapping_para;
+ u8 flag;
+ u8 rsvd1[3];
+ __le32 qs_rate;
+};
+
+#define HCLGE_BP_GRP_NUM 32
+#define HCLGE_BP_SUB_GRP_ID_S 0
+#define HCLGE_BP_SUB_GRP_ID_M GENMASK(4, 0)
+#define HCLGE_BP_GRP_ID_S 5
+#define HCLGE_BP_GRP_ID_M GENMASK(9, 5)
+
+#define HCLGE_BP_EXT_GRP_NUM 40
+#define HCLGE_BP_EXT_GRP_ID_S 5
+#define HCLGE_BP_EXT_GRP_ID_M GENMASK(10, 5)
+
+struct hclge_bp_to_qs_map_cmd {
+ u8 tc_id;
+ u8 rsvd[2];
+ u8 qs_group_id;
+ __le32 qs_bit_map;
+ u32 rsvd1;
+};
+
+#define HCLGE_PFC_DISABLE 0
+#define HCLGE_PFC_TX_RX_DISABLE 0
+
+struct hclge_pfc_en_cmd {
+ u8 tx_rx_en_bitmap;
+ u8 pri_en_bitmap;
+};
+
+struct hclge_cfg_pause_param_cmd {
+ u8 mac_addr[ETH_ALEN];
+ u8 pause_trans_gap;
+ u8 rsvd;
+ __le16 pause_trans_time;
+ u8 rsvd1[6];
+ /* extra mac address to do double check for pause frame */
+ u8 mac_addr_extra[ETH_ALEN];
+ u16 rsvd2;
+};
+
+struct hclge_pfc_stats_cmd {
+ __le64 pkt_num[3];
+};
+
+struct hclge_port_shapping_cmd {
+ __le32 port_shapping_para;
+ u8 flag;
+ u8 rsvd[3];
+ __le32 port_rate;
+};
+
+struct hclge_shaper_ir_para {
+ u8 ir_b; /* IR_B parameter of IR shaper */
+ u8 ir_u; /* IR_U parameter of IR shaper */
+ u8 ir_s; /* IR_S parameter of IR shaper */
+};
+
+struct hclge_tm_nodes_cmd {
+ u8 pg_base_id;
+ u8 pri_base_id;
+ __le16 qset_base_id;
+ __le16 queue_base_id;
+ u8 pg_num;
+ u8 pri_num;
+ __le16 qset_num;
+ __le16 queue_num;
+};
+
+struct hclge_tm_shaper_para {
+ u32 rate;
+ u8 ir_b;
+ u8 ir_u;
+ u8 ir_s;
+ u8 bs_b;
+ u8 bs_s;
+ u8 flag;
+};
+
+#define hclge_tm_set_field(dest, string, val) \
+ hnae3_set_field((dest), \
+ (HCLGE_TM_SHAP_##string##_MSK), \
+ (HCLGE_TM_SHAP_##string##_LSH), val)
+#define hclge_tm_get_field(src, string) \
+ hnae3_get_field((src), HCLGE_TM_SHAP_##string##_MSK, \
+ HCLGE_TM_SHAP_##string##_LSH)
+
+int hclge_tm_schd_init(struct hclge_dev *hdev);
+int hclge_tm_vport_map_update(struct hclge_dev *hdev);
+int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init);
+int hclge_tm_schd_setup_hw(struct hclge_dev *hdev);
+void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc);
+void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc);
+void hclge_tm_pfc_info_update(struct hclge_dev *hdev);
+int hclge_tm_dwrr_cfg(struct hclge_dev *hdev);
+int hclge_tm_init_hw(struct hclge_dev *hdev, bool init);
+int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
+ u8 pfc_bitmap);
+int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx);
+int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr);
+int hclge_mac_pause_setup_hw(struct hclge_dev *hdev);
+void hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats);
+void hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats);
+int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate);
+int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev);
+int hclge_tm_get_qset_num(struct hclge_dev *hdev, u16 *qset_num);
+int hclge_tm_get_pri_num(struct hclge_dev *hdev, u8 *pri_num);
+int hclge_tm_get_qset_map_pri(struct hclge_dev *hdev, u16 qset_id, u8 *priority,
+ u8 *link_vld);
+int hclge_tm_get_qset_sch_mode(struct hclge_dev *hdev, u16 qset_id, u8 *mode);
+int hclge_tm_get_qset_weight(struct hclge_dev *hdev, u16 qset_id, u8 *weight);
+int hclge_tm_get_qset_shaper(struct hclge_dev *hdev, u16 qset_id,
+ struct hclge_tm_shaper_para *para);
+int hclge_tm_get_pri_sch_mode(struct hclge_dev *hdev, u8 pri_id, u8 *mode);
+int hclge_tm_get_pri_weight(struct hclge_dev *hdev, u8 pri_id, u8 *weight);
+int hclge_tm_get_pri_shaper(struct hclge_dev *hdev, u8 pri_id,
+ enum hclge_opcode_type cmd,
+ struct hclge_tm_shaper_para *para);
+int hclge_tm_get_q_to_qs_map(struct hclge_dev *hdev, u16 q_id, u16 *qset_id);
+int hclge_tm_get_q_to_tc(struct hclge_dev *hdev, u16 q_id, u8 *tc_id);
+int hclge_tm_get_pg_to_pri_map(struct hclge_dev *hdev, u8 pg_id,
+ u8 *pri_bit_map);
+int hclge_tm_get_pg_weight(struct hclge_dev *hdev, u8 pg_id, u8 *weight);
+int hclge_tm_get_pg_sch_mode(struct hclge_dev *hdev, u8 pg_id, u8 *mode);
+int hclge_tm_get_pg_shaper(struct hclge_dev *hdev, u8 pg_id,
+ enum hclge_opcode_type cmd,
+ struct hclge_tm_shaper_para *para);
+int hclge_tm_get_port_shaper(struct hclge_dev *hdev,
+ struct hclge_tm_shaper_para *para);
+int hclge_up_to_tc_map(struct hclge_dev *hdev);
+int hclge_dscp_to_tc_map(struct hclge_dev *hdev);
+int hclge_tm_flush_cfg(struct hclge_dev *hdev, bool enable);
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h
new file mode 100644
index 0000000000..8510b88d49
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2018-2020 Hisilicon Limited. */
+
+/* This must be outside ifdef _HCLGE_TRACE_H */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hns3
+
+#if !defined(_HCLGE_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _HCLGE_TRACE_H_
+
+#include <linux/tracepoint.h>
+
+#define PF_GET_MBX_LEN (sizeof(struct hclge_mbx_vf_to_pf_cmd) / sizeof(u32))
+#define PF_SEND_MBX_LEN (sizeof(struct hclge_mbx_pf_to_vf_cmd) / sizeof(u32))
+
+TRACE_EVENT(hclge_pf_mbx_get,
+ TP_PROTO(
+ struct hclge_dev *hdev,
+ struct hclge_mbx_vf_to_pf_cmd *req),
+ TP_ARGS(hdev, req),
+
+ TP_STRUCT__entry(
+ __field(u8, vfid)
+ __field(u8, code)
+ __field(u8, subcode)
+ __string(pciname, pci_name(hdev->pdev))
+ __string(devname, &hdev->vport[0].nic.kinfo.netdev->name)
+ __array(u32, mbx_data, PF_GET_MBX_LEN)
+ ),
+
+ TP_fast_assign(
+ __entry->vfid = req->mbx_src_vfid;
+ __entry->code = req->msg.code;
+ __entry->subcode = req->msg.subcode;
+ __assign_str(pciname, pci_name(hdev->pdev));
+ __assign_str(devname, &hdev->vport[0].nic.kinfo.netdev->name);
+ memcpy(__entry->mbx_data, req,
+ sizeof(struct hclge_mbx_vf_to_pf_cmd));
+ ),
+
+ TP_printk(
+ "%s %s vfid:%u code:%u subcode:%u data:%s",
+ __get_str(pciname), __get_str(devname), __entry->vfid,
+ __entry->code, __entry->subcode,
+ __print_array(__entry->mbx_data, PF_GET_MBX_LEN, sizeof(u32))
+ )
+);
+
+TRACE_EVENT(hclge_pf_mbx_send,
+ TP_PROTO(
+ struct hclge_dev *hdev,
+ struct hclge_mbx_pf_to_vf_cmd *req),
+ TP_ARGS(hdev, req),
+
+ TP_STRUCT__entry(
+ __field(u8, vfid)
+ __field(u16, code)
+ __string(pciname, pci_name(hdev->pdev))
+ __string(devname, &hdev->vport[0].nic.kinfo.netdev->name)
+ __array(u32, mbx_data, PF_SEND_MBX_LEN)
+ ),
+
+ TP_fast_assign(
+ __entry->vfid = req->dest_vfid;
+ __entry->code = le16_to_cpu(req->msg.code);
+ __assign_str(pciname, pci_name(hdev->pdev));
+ __assign_str(devname, &hdev->vport[0].nic.kinfo.netdev->name);
+ memcpy(__entry->mbx_data, req,
+ sizeof(struct hclge_mbx_pf_to_vf_cmd));
+ ),
+
+ TP_printk(
+ "%s %s vfid:%u code:%u data:%s",
+ __get_str(pciname), __get_str(devname), __entry->vfid,
+ __entry->code,
+ __print_array(__entry->mbx_data, PF_SEND_MBX_LEN, sizeof(u32))
+ )
+);
+
+#endif /* _HCLGE_TRACE_H_ */
+
+/* This must be outside ifdef _HCLGE_TRACE_H */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE hclge_trace
+#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
new file mode 100644
index 0000000000..537b887fa0
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2016-2017 Hisilicon Limited. */
+
+#ifndef __HCLGEVF_CMD_H
+#define __HCLGEVF_CMD_H
+#include <linux/io.h>
+#include <linux/types.h>
+#include "hnae3.h"
+#include "hclge_comm_cmd.h"
+
+#define HCLGEVF_CMDQ_RX_INVLD_B 0
+#define HCLGEVF_CMDQ_RX_OUTVLD_B 1
+
+struct hclgevf_hw;
+struct hclgevf_dev;
+
+#define HCLGEVF_SYNC_RX_RING_HEAD_EN_B 4
+
+#define HCLGEVF_TQP_REG_OFFSET 0x80000
+#define HCLGEVF_TQP_REG_SIZE 0x200
+
+#define HCLGEVF_TQP_MAX_SIZE_DEV_V2 1024
+#define HCLGEVF_TQP_EXT_REG_OFFSET 0x100
+
+struct hclgevf_tqp_map {
+ __le16 tqp_id; /* Absolute tqp id for in this pf */
+ u8 tqp_vf; /* VF id */
+#define HCLGEVF_TQP_MAP_TYPE_PF 0
+#define HCLGEVF_TQP_MAP_TYPE_VF 1
+#define HCLGEVF_TQP_MAP_TYPE_B 0
+#define HCLGEVF_TQP_MAP_EN_B 1
+ u8 tqp_flag; /* Indicate it's pf or vf tqp */
+ __le16 tqp_vid; /* Virtual id in this pf/vf */
+ u8 rsv[18];
+};
+
+#define HCLGEVF_VECTOR_ELEMENTS_PER_CMD 10
+
+enum hclgevf_int_type {
+ HCLGEVF_INT_TX = 0,
+ HCLGEVF_INT_RX,
+ HCLGEVF_INT_EVENT,
+};
+
+struct hclgevf_ctrl_vector_chain {
+ u8 int_vector_id;
+ u8 int_cause_num;
+#define HCLGEVF_INT_TYPE_S 0
+#define HCLGEVF_INT_TYPE_M 0x3
+#define HCLGEVF_TQP_ID_S 2
+#define HCLGEVF_TQP_ID_M (0x3fff << HCLGEVF_TQP_ID_S)
+ __le16 tqp_type_and_id[HCLGEVF_VECTOR_ELEMENTS_PER_CMD];
+ u8 vfid;
+ u8 resv;
+};
+
+#define HCLGEVF_MSIX_OFT_ROCEE_S 0
+#define HCLGEVF_MSIX_OFT_ROCEE_M (0xffff << HCLGEVF_MSIX_OFT_ROCEE_S)
+#define HCLGEVF_VEC_NUM_S 0
+#define HCLGEVF_VEC_NUM_M (0xff << HCLGEVF_VEC_NUM_S)
+struct hclgevf_query_res_cmd {
+ __le16 tqp_num;
+ __le16 reserved;
+ __le16 msixcap_localid_ba_nic;
+ __le16 msixcap_localid_ba_rocee;
+ __le16 vf_intr_vector_number;
+ __le16 rsv[7];
+};
+
+#define HCLGEVF_GRO_EN_B 0
+struct hclgevf_cfg_gro_status_cmd {
+ u8 gro_en;
+ u8 rsv[23];
+};
+
+#define HCLGEVF_LINK_STS_B 0
+#define HCLGEVF_LINK_STATUS BIT(HCLGEVF_LINK_STS_B)
+struct hclgevf_link_status_cmd {
+ u8 status;
+ u8 rsv[23];
+};
+
+#define HCLGEVF_RING_ID_MASK 0x3ff
+#define HCLGEVF_TQP_ENABLE_B 0
+
+struct hclgevf_cfg_com_tqp_queue_cmd {
+ __le16 tqp_id;
+ __le16 stream_id;
+ u8 enable;
+ u8 rsv[19];
+};
+
+struct hclgevf_cfg_tx_queue_pointer_cmd {
+ __le16 tqp_id;
+ __le16 tx_tail;
+ __le16 tx_head;
+ __le16 fbd_num;
+ __le16 ring_offset;
+ u8 rsv[14];
+};
+
+/* this bit indicates that the driver is ready for hardware reset */
+#define HCLGEVF_NIC_SW_RST_RDY_B 16
+#define HCLGEVF_NIC_SW_RST_RDY BIT(HCLGEVF_NIC_SW_RST_RDY_B)
+
+#define HCLGEVF_NIC_CMQ_DESC_NUM 1024
+#define HCLGEVF_NIC_CMQ_DESC_NUM_S 3
+
+#define HCLGEVF_QUERY_DEV_SPECS_BD_NUM 4
+
+#define hclgevf_cmd_setup_basic_desc(desc, opcode, is_read) \
+ hclge_comm_cmd_setup_basic_desc(desc, opcode, is_read)
+
+struct hclgevf_dev_specs_0_cmd {
+ __le32 rsv0;
+ __le32 mac_entry_num;
+ __le32 mng_entry_num;
+ __le16 rss_ind_tbl_size;
+ __le16 rss_key_size;
+ __le16 int_ql_max;
+ u8 max_non_tso_bd_num;
+ u8 rsv1[5];
+};
+
+#define HCLGEVF_DEF_MAX_INT_GL 0x1FE0U
+
+struct hclgevf_dev_specs_1_cmd {
+ __le16 max_frm_size;
+ __le16 rsv0;
+ __le16 max_int_gl;
+ u8 rsv1[18];
+};
+
+int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclge_desc *desc, int num);
+void hclgevf_arq_init(struct hclgevf_dev *hdev);
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c
new file mode 100644
index 0000000000..1b535142c6
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2021 Hisilicon Limited. */
+
+#include <net/devlink.h>
+
+#include "hclgevf_devlink.h"
+
+static int hclgevf_devlink_info_get(struct devlink *devlink,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+#define HCLGEVF_DEVLINK_FW_STRING_LEN 32
+ struct hclgevf_devlink_priv *priv = devlink_priv(devlink);
+ char version_str[HCLGEVF_DEVLINK_FW_STRING_LEN];
+ struct hclgevf_dev *hdev = priv->hdev;
+
+ snprintf(version_str, sizeof(version_str), "%lu.%lu.%lu.%lu",
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE3_MASK,
+ HNAE3_FW_VERSION_BYTE3_SHIFT),
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE2_MASK,
+ HNAE3_FW_VERSION_BYTE2_SHIFT),
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE1_MASK,
+ HNAE3_FW_VERSION_BYTE1_SHIFT),
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE0_MASK,
+ HNAE3_FW_VERSION_BYTE0_SHIFT));
+
+ return devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW,
+ version_str);
+}
+
+static int hclgevf_devlink_reload_down(struct devlink *devlink,
+ bool netns_change,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
+ struct netlink_ext_ack *extack)
+{
+ struct hclgevf_devlink_priv *priv = devlink_priv(devlink);
+ struct hclgevf_dev *hdev = priv->hdev;
+ struct hnae3_handle *h = &hdev->nic;
+ struct pci_dev *pdev = hdev->pdev;
+ int ret;
+
+ if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) {
+ dev_err(&pdev->dev, "reset is handling\n");
+ return -EBUSY;
+ }
+
+ switch (action) {
+ case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
+ rtnl_lock();
+ ret = hdev->nic_client->ops->reset_notify(h, HNAE3_DOWN_CLIENT);
+ if (ret) {
+ rtnl_unlock();
+ return ret;
+ }
+
+ ret = hdev->nic_client->ops->reset_notify(h,
+ HNAE3_UNINIT_CLIENT);
+ rtnl_unlock();
+ return ret;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int hclgevf_devlink_reload_up(struct devlink *devlink,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
+ u32 *actions_performed,
+ struct netlink_ext_ack *extack)
+{
+ struct hclgevf_devlink_priv *priv = devlink_priv(devlink);
+ struct hclgevf_dev *hdev = priv->hdev;
+ struct hnae3_handle *h = &hdev->nic;
+ int ret;
+
+ *actions_performed = BIT(action);
+ switch (action) {
+ case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
+ rtnl_lock();
+ ret = hdev->nic_client->ops->reset_notify(h, HNAE3_INIT_CLIENT);
+ if (ret) {
+ rtnl_unlock();
+ return ret;
+ }
+
+ ret = hdev->nic_client->ops->reset_notify(h, HNAE3_UP_CLIENT);
+ rtnl_unlock();
+ return ret;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct devlink_ops hclgevf_devlink_ops = {
+ .info_get = hclgevf_devlink_info_get,
+ .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT),
+ .reload_down = hclgevf_devlink_reload_down,
+ .reload_up = hclgevf_devlink_reload_up,
+};
+
+int hclgevf_devlink_init(struct hclgevf_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ struct hclgevf_devlink_priv *priv;
+ struct devlink *devlink;
+
+ devlink =
+ devlink_alloc(&hclgevf_devlink_ops,
+ sizeof(struct hclgevf_devlink_priv), &pdev->dev);
+ if (!devlink)
+ return -ENOMEM;
+
+ priv = devlink_priv(devlink);
+ priv->hdev = hdev;
+ hdev->devlink = devlink;
+
+ devlink_register(devlink);
+ return 0;
+}
+
+void hclgevf_devlink_uninit(struct hclgevf_dev *hdev)
+{
+ struct devlink *devlink = hdev->devlink;
+
+ devlink_unregister(devlink);
+
+ devlink_free(devlink);
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.h
new file mode 100644
index 0000000000..e09ea3d8a9
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2021 Hisilicon Limited. */
+
+#ifndef __HCLGEVF_DEVLINK_H
+#define __HCLGEVF_DEVLINK_H
+
+#include "hclgevf_main.h"
+
+struct hclgevf_devlink_priv {
+ struct hclgevf_dev *hdev;
+};
+
+int hclgevf_devlink_init(struct hclgevf_dev *hdev);
+void hclgevf_devlink_uninit(struct hclgevf_dev *hdev);
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
new file mode 100644
index 0000000000..0aa9beefd1
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -0,0 +1,3359 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
+
+#include <linux/etherdevice.h>
+#include <linux/iopoll.h>
+#include <net/rtnetlink.h>
+#include "hclgevf_cmd.h"
+#include "hclgevf_main.h"
+#include "hclgevf_regs.h"
+#include "hclge_mbx.h"
+#include "hnae3.h"
+#include "hclgevf_devlink.h"
+#include "hclge_comm_rss.h"
+
+#define HCLGEVF_NAME "hclgevf"
+
+#define HCLGEVF_RESET_MAX_FAIL_CNT 5
+
+static int hclgevf_reset_hdev(struct hclgevf_dev *hdev);
+static void hclgevf_task_schedule(struct hclgevf_dev *hdev,
+ unsigned long delay);
+
+static struct hnae3_ae_algo ae_algovf;
+
+static struct workqueue_struct *hclgevf_wq;
+
+static const struct pci_device_id ae_algovf_pci_tbl[] = {
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_VF), 0},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF),
+ HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
+ /* required last entry */
+ {0, }
+};
+
+MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl);
+
+/* hclgevf_cmd_send - send command to command queue
+ * @hw: pointer to the hw struct
+ * @desc: prefilled descriptor for describing the command
+ * @num : the number of descriptors to be sent
+ *
+ * This is the main send command for command queue, it
+ * sends the queue, cleans the queue, etc
+ */
+int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclge_desc *desc, int num)
+{
+ return hclge_comm_cmd_send(&hw->hw, desc, num);
+}
+
+void hclgevf_arq_init(struct hclgevf_dev *hdev)
+{
+ struct hclge_comm_cmq *cmdq = &hdev->hw.hw.cmq;
+
+ spin_lock(&cmdq->crq.lock);
+ /* initialize the pointers of async rx queue of mailbox */
+ hdev->arq.hdev = hdev;
+ hdev->arq.head = 0;
+ hdev->arq.tail = 0;
+ atomic_set(&hdev->arq.count, 0);
+ spin_unlock(&cmdq->crq.lock);
+}
+
+struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle)
+{
+ if (!handle->client)
+ return container_of(handle, struct hclgevf_dev, nic);
+ else if (handle->client->type == HNAE3_CLIENT_ROCE)
+ return container_of(handle, struct hclgevf_dev, roce);
+ else
+ return container_of(handle, struct hclgevf_dev, nic);
+}
+
+static void hclgevf_update_stats(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ int status;
+
+ status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "VF update of TQPS stats fail, status = %d.\n",
+ status);
+}
+
+static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset)
+{
+ if (strset == ETH_SS_TEST)
+ return -EOPNOTSUPP;
+ else if (strset == ETH_SS_STATS)
+ return hclge_comm_tqps_get_sset_count(handle);
+
+ return 0;
+}
+
+static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset,
+ u8 *data)
+{
+ u8 *p = (char *)data;
+
+ if (strset == ETH_SS_STATS)
+ p = hclge_comm_tqps_get_strings(handle, p);
+}
+
+static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data)
+{
+ hclge_comm_tqps_get_stats(handle, data);
+}
+
+static void hclgevf_build_send_msg(struct hclge_vf_to_pf_msg *msg, u8 code,
+ u8 subcode)
+{
+ if (msg) {
+ memset(msg, 0, sizeof(struct hclge_vf_to_pf_msg));
+ msg->code = code;
+ msg->subcode = subcode;
+ }
+}
+
+static int hclgevf_get_basic_info(struct hclgevf_dev *hdev)
+{
+ struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
+ u8 resp_msg[HCLGE_MBX_MAX_RESP_DATA_SIZE];
+ struct hclge_basic_info *basic_info;
+ struct hclge_vf_to_pf_msg send_msg;
+ unsigned long caps;
+ int status;
+
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_BASIC_INFO, 0);
+ status = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
+ sizeof(resp_msg));
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get basic info from pf, ret = %d", status);
+ return status;
+ }
+
+ basic_info = (struct hclge_basic_info *)resp_msg;
+
+ hdev->hw_tc_map = basic_info->hw_tc_map;
+ hdev->mbx_api_version = le16_to_cpu(basic_info->mbx_api_version);
+ caps = le32_to_cpu(basic_info->pf_caps);
+ if (test_bit(HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B, &caps))
+ set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
+
+ return 0;
+}
+
+static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev)
+{
+ struct hnae3_handle *nic = &hdev->nic;
+ struct hclge_vf_to_pf_msg send_msg;
+ u8 resp_msg;
+ int ret;
+
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
+ HCLGE_MBX_GET_PORT_BASE_VLAN_STATE);
+ ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &resp_msg,
+ sizeof(u8));
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "VF request to get port based vlan state failed %d",
+ ret);
+ return ret;
+ }
+
+ nic->port_base_vlan_state = resp_msg;
+
+ return 0;
+}
+
+static int hclgevf_get_queue_info(struct hclgevf_dev *hdev)
+{
+#define HCLGEVF_TQPS_RSS_INFO_LEN 6
+
+ struct hclge_mbx_vf_queue_info *queue_info;
+ u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN];
+ struct hclge_vf_to_pf_msg send_msg;
+ int status;
+
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QINFO, 0);
+ status = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
+ HCLGEVF_TQPS_RSS_INFO_LEN);
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "VF request to get tqp info from PF failed %d",
+ status);
+ return status;
+ }
+
+ queue_info = (struct hclge_mbx_vf_queue_info *)resp_msg;
+ hdev->num_tqps = le16_to_cpu(queue_info->num_tqps);
+ hdev->rss_size_max = le16_to_cpu(queue_info->rss_size);
+ hdev->rx_buf_len = le16_to_cpu(queue_info->rx_buf_len);
+
+ return 0;
+}
+
+static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev)
+{
+#define HCLGEVF_TQPS_DEPTH_INFO_LEN 4
+
+ struct hclge_mbx_vf_queue_depth *queue_depth;
+ u8 resp_msg[HCLGEVF_TQPS_DEPTH_INFO_LEN];
+ struct hclge_vf_to_pf_msg send_msg;
+ int ret;
+
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QDEPTH, 0);
+ ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
+ HCLGEVF_TQPS_DEPTH_INFO_LEN);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "VF request to get tqp depth info from PF failed %d",
+ ret);
+ return ret;
+ }
+
+ queue_depth = (struct hclge_mbx_vf_queue_depth *)resp_msg;
+ hdev->num_tx_desc = le16_to_cpu(queue_depth->num_tx_desc);
+ hdev->num_rx_desc = le16_to_cpu(queue_depth->num_rx_desc);
+
+ return 0;
+}
+
+static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclge_vf_to_pf_msg send_msg;
+ u16 qid_in_pf = 0;
+ u8 resp_data[2];
+ int ret;
+
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QID_IN_PF, 0);
+ *(__le16 *)send_msg.data = cpu_to_le16(queue_id);
+ ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_data,
+ sizeof(resp_data));
+ if (!ret)
+ qid_in_pf = le16_to_cpu(*(__le16 *)resp_data);
+
+ return qid_in_pf;
+}
+
+static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev)
+{
+ struct hclge_vf_to_pf_msg send_msg;
+ u8 resp_msg[2];
+ int ret;
+
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MEDIA_TYPE, 0);
+ ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
+ sizeof(resp_msg));
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "VF request to get the pf port media type failed %d",
+ ret);
+ return ret;
+ }
+
+ hdev->hw.mac.media_type = resp_msg[0];
+ hdev->hw.mac.module_type = resp_msg[1];
+
+ return 0;
+}
+
+static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ struct hclge_comm_tqp *tqp;
+ int i;
+
+ hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
+ sizeof(struct hclge_comm_tqp), GFP_KERNEL);
+ if (!hdev->htqp)
+ return -ENOMEM;
+
+ tqp = hdev->htqp;
+
+ for (i = 0; i < hdev->num_tqps; i++) {
+ tqp->dev = &hdev->pdev->dev;
+ tqp->index = i;
+
+ tqp->q.ae_algo = &ae_algovf;
+ tqp->q.buf_size = hdev->rx_buf_len;
+ tqp->q.tx_desc_num = hdev->num_tx_desc;
+ tqp->q.rx_desc_num = hdev->num_rx_desc;
+
+ /* need an extended offset to configure queues >=
+ * HCLGEVF_TQP_MAX_SIZE_DEV_V2.
+ */
+ if (i < HCLGEVF_TQP_MAX_SIZE_DEV_V2)
+ tqp->q.io_base = hdev->hw.hw.io_base +
+ HCLGEVF_TQP_REG_OFFSET +
+ i * HCLGEVF_TQP_REG_SIZE;
+ else
+ tqp->q.io_base = hdev->hw.hw.io_base +
+ HCLGEVF_TQP_REG_OFFSET +
+ HCLGEVF_TQP_EXT_REG_OFFSET +
+ (i - HCLGEVF_TQP_MAX_SIZE_DEV_V2) *
+ HCLGEVF_TQP_REG_SIZE;
+
+ /* when device supports tx push and has device memory,
+ * the queue can execute push mode or doorbell mode on
+ * device memory.
+ */
+ if (test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps))
+ tqp->q.mem_base = hdev->hw.hw.mem_base +
+ HCLGEVF_TQP_MEM_OFFSET(hdev, i);
+
+ tqp++;
+ }
+
+ return 0;
+}
+
+static int hclgevf_knic_setup(struct hclgevf_dev *hdev)
+{
+ struct hnae3_handle *nic = &hdev->nic;
+ struct hnae3_knic_private_info *kinfo;
+ u16 new_tqps = hdev->num_tqps;
+ unsigned int i;
+ u8 num_tc = 0;
+
+ kinfo = &nic->kinfo;
+ kinfo->num_tx_desc = hdev->num_tx_desc;
+ kinfo->num_rx_desc = hdev->num_rx_desc;
+ kinfo->rx_buf_len = hdev->rx_buf_len;
+ for (i = 0; i < HCLGE_COMM_MAX_TC_NUM; i++)
+ if (hdev->hw_tc_map & BIT(i))
+ num_tc++;
+
+ num_tc = num_tc ? num_tc : 1;
+ kinfo->tc_info.num_tc = num_tc;
+ kinfo->rss_size = min_t(u16, hdev->rss_size_max, new_tqps / num_tc);
+ new_tqps = kinfo->rss_size * num_tc;
+ kinfo->num_tqps = min(new_tqps, hdev->num_tqps);
+
+ kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
+ sizeof(struct hnae3_queue *), GFP_KERNEL);
+ if (!kinfo->tqp)
+ return -ENOMEM;
+
+ for (i = 0; i < kinfo->num_tqps; i++) {
+ hdev->htqp[i].q.handle = &hdev->nic;
+ hdev->htqp[i].q.tqp_index = i;
+ kinfo->tqp[i] = &hdev->htqp[i].q;
+ }
+
+ /* after init the max rss_size and tqps, adjust the default tqp numbers
+ * and rss size with the actual vector numbers
+ */
+ kinfo->num_tqps = min_t(u16, hdev->num_nic_msix - 1, kinfo->num_tqps);
+ kinfo->rss_size = min_t(u16, kinfo->num_tqps / num_tc,
+ kinfo->rss_size);
+
+ return 0;
+}
+
+static void hclgevf_request_link_info(struct hclgevf_dev *hdev)
+{
+ struct hclge_vf_to_pf_msg send_msg;
+ int status;
+
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_STATUS, 0);
+ status = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "VF failed to fetch link status(%d) from PF", status);
+}
+
+void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state)
+{
+ struct hnae3_handle *rhandle = &hdev->roce;
+ struct hnae3_handle *handle = &hdev->nic;
+ struct hnae3_client *rclient;
+ struct hnae3_client *client;
+
+ if (test_and_set_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state))
+ return;
+
+ client = handle->client;
+ rclient = hdev->roce_client;
+
+ link_state =
+ test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state;
+ if (link_state != hdev->hw.mac.link) {
+ hdev->hw.mac.link = link_state;
+ client->ops->link_status_change(handle, !!link_state);
+ if (rclient && rclient->ops->link_status_change)
+ rclient->ops->link_status_change(rhandle, !!link_state);
+ }
+
+ clear_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state);
+}
+
+static void hclgevf_update_link_mode(struct hclgevf_dev *hdev)
+{
+#define HCLGEVF_ADVERTISING 0
+#define HCLGEVF_SUPPORTED 1
+
+ struct hclge_vf_to_pf_msg send_msg;
+
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_MODE, 0);
+ send_msg.data[0] = HCLGEVF_ADVERTISING;
+ hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
+ send_msg.data[0] = HCLGEVF_SUPPORTED;
+ hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
+}
+
+static int hclgevf_set_handle_info(struct hclgevf_dev *hdev)
+{
+ struct hnae3_handle *nic = &hdev->nic;
+ int ret;
+
+ nic->ae_algo = &ae_algovf;
+ nic->pdev = hdev->pdev;
+ nic->numa_node_mask = hdev->numa_node_mask;
+ nic->flags |= HNAE3_SUPPORT_VF;
+ nic->kinfo.io_base = hdev->hw.hw.io_base;
+
+ ret = hclgevf_knic_setup(hdev);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n",
+ ret);
+ return ret;
+}
+
+static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id)
+{
+ if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) {
+ dev_warn(&hdev->pdev->dev,
+ "vector(vector_id %d) has been freed.\n", vector_id);
+ return;
+ }
+
+ hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT;
+ hdev->num_msi_left += 1;
+ hdev->num_msi_used -= 1;
+}
+
+static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num,
+ struct hnae3_vector_info *vector_info)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hnae3_vector_info *vector = vector_info;
+ int alloc = 0;
+ int i, j;
+
+ vector_num = min_t(u16, hdev->num_nic_msix - 1, vector_num);
+ vector_num = min(hdev->num_msi_left, vector_num);
+
+ for (j = 0; j < vector_num; j++) {
+ for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) {
+ if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) {
+ vector->vector = pci_irq_vector(hdev->pdev, i);
+ vector->io_addr = hdev->hw.hw.io_base +
+ HCLGEVF_VECTOR_REG_BASE +
+ (i - 1) * HCLGEVF_VECTOR_REG_OFFSET;
+ hdev->vector_status[i] = 0;
+ hdev->vector_irq[i] = vector->vector;
+
+ vector++;
+ alloc++;
+
+ break;
+ }
+ }
+ }
+ hdev->num_msi_left -= alloc;
+ hdev->num_msi_used += alloc;
+
+ return alloc;
+}
+
+static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector)
+{
+ int i;
+
+ for (i = 0; i < hdev->num_msi; i++)
+ if (vector == hdev->vector_irq[i])
+ return i;
+
+ return -EINVAL;
+}
+
+/* for revision 0x20, vf shared the same rss config with pf */
+static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev)
+{
+#define HCLGEVF_RSS_MBX_RESP_LEN 8
+ struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
+ u8 resp_msg[HCLGEVF_RSS_MBX_RESP_LEN];
+ struct hclge_vf_to_pf_msg send_msg;
+ u16 msg_num, hash_key_index;
+ u8 index;
+ int ret;
+
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_RSS_KEY, 0);
+ msg_num = (HCLGE_COMM_RSS_KEY_SIZE + HCLGEVF_RSS_MBX_RESP_LEN - 1) /
+ HCLGEVF_RSS_MBX_RESP_LEN;
+ for (index = 0; index < msg_num; index++) {
+ send_msg.data[0] = index;
+ ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
+ HCLGEVF_RSS_MBX_RESP_LEN);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "VF get rss hash key from PF failed, ret=%d",
+ ret);
+ return ret;
+ }
+
+ hash_key_index = HCLGEVF_RSS_MBX_RESP_LEN * index;
+ if (index == msg_num - 1)
+ memcpy(&rss_cfg->rss_hash_key[hash_key_index],
+ &resp_msg[0],
+ HCLGE_COMM_RSS_KEY_SIZE - hash_key_index);
+ else
+ memcpy(&rss_cfg->rss_hash_key[hash_key_index],
+ &resp_msg[0], HCLGEVF_RSS_MBX_RESP_LEN);
+ }
+
+ return 0;
+}
+
+static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
+ int ret;
+
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
+ hclge_comm_get_rss_hash_info(rss_cfg, key, hfunc);
+ } else {
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+ if (key) {
+ ret = hclgevf_get_rss_hash_key(hdev);
+ if (ret)
+ return ret;
+ memcpy(key, rss_cfg->rss_hash_key,
+ HCLGE_COMM_RSS_KEY_SIZE);
+ }
+ }
+
+ hclge_comm_get_rss_indir_tbl(rss_cfg, indir,
+ hdev->ae_dev->dev_specs.rss_ind_tbl_size);
+
+ return 0;
+}
+
+static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
+ int ret, i;
+
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
+ ret = hclge_comm_set_rss_hash_key(rss_cfg, &hdev->hw.hw, key,
+ hfunc);
+ if (ret)
+ return ret;
+ }
+
+ /* update the shadow RSS table with user specified qids */
+ for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
+ rss_cfg->rss_indirection_tbl[i] = indir[i];
+
+ /* update the hardware */
+ return hclge_comm_set_rss_indir_table(hdev->ae_dev, &hdev->hw.hw,
+ rss_cfg->rss_indirection_tbl);
+}
+
+static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *nfc)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ int ret;
+
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
+ return -EOPNOTSUPP;
+
+ ret = hclge_comm_set_rss_tuple(hdev->ae_dev, &hdev->hw.hw,
+ &hdev->rss_cfg, nfc);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to set rss tuple, ret = %d.\n", ret);
+
+ return ret;
+}
+
+static int hclgevf_get_rss_tuple(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *nfc)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ u8 tuple_sets;
+ int ret;
+
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
+ return -EOPNOTSUPP;
+
+ nfc->data = 0;
+
+ ret = hclge_comm_get_rss_tuple(&hdev->rss_cfg, nfc->flow_type,
+ &tuple_sets);
+ if (ret || !tuple_sets)
+ return ret;
+
+ nfc->data = hclge_comm_convert_rss_tuple(tuple_sets);
+
+ return 0;
+}
+
+static int hclgevf_get_tc_size(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
+
+ return rss_cfg->rss_size;
+}
+
+static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en,
+ int vector_id,
+ struct hnae3_ring_chain_node *ring_chain)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclge_vf_to_pf_msg send_msg;
+ struct hnae3_ring_chain_node *node;
+ int status;
+ int i = 0;
+
+ memset(&send_msg, 0, sizeof(send_msg));
+ send_msg.code = en ? HCLGE_MBX_MAP_RING_TO_VECTOR :
+ HCLGE_MBX_UNMAP_RING_TO_VECTOR;
+ send_msg.vector_id = vector_id;
+
+ for (node = ring_chain; node; node = node->next) {
+ send_msg.param[i].ring_type =
+ hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B);
+
+ send_msg.param[i].tqp_index = node->tqp_index;
+ send_msg.param[i].int_gl_index =
+ hnae3_get_field(node->int_gl_idx,
+ HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S);
+
+ i++;
+ if (i == HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM || !node->next) {
+ send_msg.ring_num = i;
+
+ status = hclgevf_send_mbx_msg(hdev, &send_msg, false,
+ NULL, 0);
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "Map TQP fail, status is %d.\n",
+ status);
+ return status;
+ }
+ i = 0;
+ }
+ }
+
+ return 0;
+}
+
+static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector,
+ struct hnae3_ring_chain_node *ring_chain)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ int vector_id;
+
+ vector_id = hclgevf_get_vector_index(hdev, vector);
+ if (vector_id < 0) {
+ dev_err(&handle->pdev->dev,
+ "Get vector index fail. ret =%d\n", vector_id);
+ return vector_id;
+ }
+
+ return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain);
+}
+
+static int hclgevf_unmap_ring_from_vector(
+ struct hnae3_handle *handle,
+ int vector,
+ struct hnae3_ring_chain_node *ring_chain)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ int ret, vector_id;
+
+ if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
+ return 0;
+
+ vector_id = hclgevf_get_vector_index(hdev, vector);
+ if (vector_id < 0) {
+ dev_err(&handle->pdev->dev,
+ "Get vector index fail. ret =%d\n", vector_id);
+ return vector_id;
+ }
+
+ ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain);
+ if (ret)
+ dev_err(&handle->pdev->dev,
+ "Unmap ring from vector fail. vector=%d, ret =%d\n",
+ vector_id,
+ ret);
+
+ return ret;
+}
+
+static int hclgevf_put_vector(struct hnae3_handle *handle, int vector)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ int vector_id;
+
+ vector_id = hclgevf_get_vector_index(hdev, vector);
+ if (vector_id < 0) {
+ dev_err(&handle->pdev->dev,
+ "hclgevf_put_vector get vector index fail. ret =%d\n",
+ vector_id);
+ return vector_id;
+ }
+
+ hclgevf_free_vector(hdev, vector_id);
+
+ return 0;
+}
+
+static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev,
+ bool en_uc_pmc, bool en_mc_pmc,
+ bool en_bc_pmc)
+{
+ struct hnae3_handle *handle = &hdev->nic;
+ struct hclge_vf_to_pf_msg send_msg;
+ int ret;
+
+ memset(&send_msg, 0, sizeof(send_msg));
+ send_msg.code = HCLGE_MBX_SET_PROMISC_MODE;
+ send_msg.en_bc = en_bc_pmc ? 1 : 0;
+ send_msg.en_uc = en_uc_pmc ? 1 : 0;
+ send_msg.en_mc = en_mc_pmc ? 1 : 0;
+ send_msg.en_limit_promisc = test_bit(HNAE3_PFLAG_LIMIT_PROMISC,
+ &handle->priv_flags) ? 1 : 0;
+
+ ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "Set promisc mode fail, status is %d.\n", ret);
+
+ return ret;
+}
+
+static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
+ bool en_mc_pmc)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ bool en_bc_pmc;
+
+ en_bc_pmc = hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2;
+
+ return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc,
+ en_bc_pmc);
+}
+
+static void hclgevf_request_update_promisc_mode(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state);
+ hclgevf_task_schedule(hdev, 0);
+}
+
+static void hclgevf_sync_promisc_mode(struct hclgevf_dev *hdev)
+{
+ struct hnae3_handle *handle = &hdev->nic;
+ bool en_uc_pmc = handle->netdev_flags & HNAE3_UPE;
+ bool en_mc_pmc = handle->netdev_flags & HNAE3_MPE;
+ int ret;
+
+ if (test_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state)) {
+ ret = hclgevf_set_promisc_mode(handle, en_uc_pmc, en_mc_pmc);
+ if (!ret)
+ clear_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state);
+ }
+}
+
+static int hclgevf_tqp_enable_cmd_send(struct hclgevf_dev *hdev, u16 tqp_id,
+ u16 stream_id, bool enable)
+{
+ struct hclgevf_cfg_com_tqp_queue_cmd *req;
+ struct hclge_desc desc;
+
+ req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data;
+
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
+ req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK);
+ req->stream_id = cpu_to_le16(stream_id);
+ if (enable)
+ req->enable |= 1U << HCLGEVF_TQP_ENABLE_B;
+
+ return hclgevf_cmd_send(&hdev->hw, &desc, 1);
+}
+
+static int hclgevf_tqp_enable(struct hnae3_handle *handle, bool enable)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ int ret;
+ u16 i;
+
+ for (i = 0; i < handle->kinfo.num_tqps; i++) {
+ ret = hclgevf_tqp_enable_cmd_send(hdev, i, 0, enable);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hclgevf_get_host_mac_addr(struct hclgevf_dev *hdev, u8 *p)
+{
+ struct hclge_vf_to_pf_msg send_msg;
+ u8 host_mac[ETH_ALEN];
+ int status;
+
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MAC_ADDR, 0);
+ status = hclgevf_send_mbx_msg(hdev, &send_msg, true, host_mac,
+ ETH_ALEN);
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "fail to get VF MAC from host %d", status);
+ return status;
+ }
+
+ ether_addr_copy(p, host_mac);
+
+ return 0;
+}
+
+static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ u8 host_mac_addr[ETH_ALEN];
+
+ if (hclgevf_get_host_mac_addr(hdev, host_mac_addr))
+ return;
+
+ hdev->has_pf_mac = !is_zero_ether_addr(host_mac_addr);
+ if (hdev->has_pf_mac)
+ ether_addr_copy(p, host_mac_addr);
+ else
+ ether_addr_copy(p, hdev->hw.mac.mac_addr);
+}
+
+static int hclgevf_set_mac_addr(struct hnae3_handle *handle, const void *p,
+ bool is_first)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr;
+ struct hclge_vf_to_pf_msg send_msg;
+ u8 *new_mac_addr = (u8 *)p;
+ int status;
+
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_UNICAST, 0);
+ send_msg.subcode = HCLGE_MBX_MAC_VLAN_UC_MODIFY;
+ ether_addr_copy(send_msg.data, new_mac_addr);
+ if (is_first && !hdev->has_pf_mac)
+ eth_zero_addr(&send_msg.data[ETH_ALEN]);
+ else
+ ether_addr_copy(&send_msg.data[ETH_ALEN], old_mac_addr);
+ status = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
+ if (!status)
+ ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr);
+
+ return status;
+}
+
+static struct hclgevf_mac_addr_node *
+hclgevf_find_mac_node(struct list_head *list, const u8 *mac_addr)
+{
+ struct hclgevf_mac_addr_node *mac_node, *tmp;
+
+ list_for_each_entry_safe(mac_node, tmp, list, node)
+ if (ether_addr_equal(mac_addr, mac_node->mac_addr))
+ return mac_node;
+
+ return NULL;
+}
+
+static void hclgevf_update_mac_node(struct hclgevf_mac_addr_node *mac_node,
+ enum HCLGEVF_MAC_NODE_STATE state)
+{
+ switch (state) {
+ /* from set_rx_mode or tmp_add_list */
+ case HCLGEVF_MAC_TO_ADD:
+ if (mac_node->state == HCLGEVF_MAC_TO_DEL)
+ mac_node->state = HCLGEVF_MAC_ACTIVE;
+ break;
+ /* only from set_rx_mode */
+ case HCLGEVF_MAC_TO_DEL:
+ if (mac_node->state == HCLGEVF_MAC_TO_ADD) {
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ } else {
+ mac_node->state = HCLGEVF_MAC_TO_DEL;
+ }
+ break;
+ /* only from tmp_add_list, the mac_node->state won't be
+ * HCLGEVF_MAC_ACTIVE
+ */
+ case HCLGEVF_MAC_ACTIVE:
+ if (mac_node->state == HCLGEVF_MAC_TO_ADD)
+ mac_node->state = HCLGEVF_MAC_ACTIVE;
+ break;
+ }
+}
+
+static int hclgevf_update_mac_list(struct hnae3_handle *handle,
+ enum HCLGEVF_MAC_NODE_STATE state,
+ enum HCLGEVF_MAC_ADDR_TYPE mac_type,
+ const unsigned char *addr)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclgevf_mac_addr_node *mac_node;
+ struct list_head *list;
+
+ list = (mac_type == HCLGEVF_MAC_ADDR_UC) ?
+ &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list;
+
+ spin_lock_bh(&hdev->mac_table.mac_list_lock);
+
+ /* if the mac addr is already in the mac list, no need to add a new
+ * one into it, just check the mac addr state, convert it to a new
+ * state, or just remove it, or do nothing.
+ */
+ mac_node = hclgevf_find_mac_node(list, addr);
+ if (mac_node) {
+ hclgevf_update_mac_node(mac_node, state);
+ spin_unlock_bh(&hdev->mac_table.mac_list_lock);
+ return 0;
+ }
+ /* if this address is never added, unnecessary to delete */
+ if (state == HCLGEVF_MAC_TO_DEL) {
+ spin_unlock_bh(&hdev->mac_table.mac_list_lock);
+ return -ENOENT;
+ }
+
+ mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
+ if (!mac_node) {
+ spin_unlock_bh(&hdev->mac_table.mac_list_lock);
+ return -ENOMEM;
+ }
+
+ mac_node->state = state;
+ ether_addr_copy(mac_node->mac_addr, addr);
+ list_add_tail(&mac_node->node, list);
+
+ spin_unlock_bh(&hdev->mac_table.mac_list_lock);
+ return 0;
+}
+
+static int hclgevf_add_uc_addr(struct hnae3_handle *handle,
+ const unsigned char *addr)
+{
+ return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD,
+ HCLGEVF_MAC_ADDR_UC, addr);
+}
+
+static int hclgevf_rm_uc_addr(struct hnae3_handle *handle,
+ const unsigned char *addr)
+{
+ return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL,
+ HCLGEVF_MAC_ADDR_UC, addr);
+}
+
+static int hclgevf_add_mc_addr(struct hnae3_handle *handle,
+ const unsigned char *addr)
+{
+ return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD,
+ HCLGEVF_MAC_ADDR_MC, addr);
+}
+
+static int hclgevf_rm_mc_addr(struct hnae3_handle *handle,
+ const unsigned char *addr)
+{
+ return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL,
+ HCLGEVF_MAC_ADDR_MC, addr);
+}
+
+static int hclgevf_add_del_mac_addr(struct hclgevf_dev *hdev,
+ struct hclgevf_mac_addr_node *mac_node,
+ enum HCLGEVF_MAC_ADDR_TYPE mac_type)
+{
+ struct hclge_vf_to_pf_msg send_msg;
+ u8 code, subcode;
+
+ if (mac_type == HCLGEVF_MAC_ADDR_UC) {
+ code = HCLGE_MBX_SET_UNICAST;
+ if (mac_node->state == HCLGEVF_MAC_TO_ADD)
+ subcode = HCLGE_MBX_MAC_VLAN_UC_ADD;
+ else
+ subcode = HCLGE_MBX_MAC_VLAN_UC_REMOVE;
+ } else {
+ code = HCLGE_MBX_SET_MULTICAST;
+ if (mac_node->state == HCLGEVF_MAC_TO_ADD)
+ subcode = HCLGE_MBX_MAC_VLAN_MC_ADD;
+ else
+ subcode = HCLGE_MBX_MAC_VLAN_MC_REMOVE;
+ }
+
+ hclgevf_build_send_msg(&send_msg, code, subcode);
+ ether_addr_copy(send_msg.data, mac_node->mac_addr);
+ return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
+}
+
+static void hclgevf_config_mac_list(struct hclgevf_dev *hdev,
+ struct list_head *list,
+ enum HCLGEVF_MAC_ADDR_TYPE mac_type)
+{
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
+ struct hclgevf_mac_addr_node *mac_node, *tmp;
+ int ret;
+
+ list_for_each_entry_safe(mac_node, tmp, list, node) {
+ ret = hclgevf_add_del_mac_addr(hdev, mac_node, mac_type);
+ if (ret) {
+ hnae3_format_mac_addr(format_mac_addr,
+ mac_node->mac_addr);
+ dev_err(&hdev->pdev->dev,
+ "failed to configure mac %s, state = %d, ret = %d\n",
+ format_mac_addr, mac_node->state, ret);
+ return;
+ }
+ if (mac_node->state == HCLGEVF_MAC_TO_ADD) {
+ mac_node->state = HCLGEVF_MAC_ACTIVE;
+ } else {
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ }
+ }
+}
+
+static void hclgevf_sync_from_add_list(struct list_head *add_list,
+ struct list_head *mac_list)
+{
+ struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node;
+
+ list_for_each_entry_safe(mac_node, tmp, add_list, node) {
+ /* if the mac address from tmp_add_list is not in the
+ * uc/mc_mac_list, it means have received a TO_DEL request
+ * during the time window of sending mac config request to PF
+ * If mac_node state is ACTIVE, then change its state to TO_DEL,
+ * then it will be removed at next time. If is TO_ADD, it means
+ * send TO_ADD request failed, so just remove the mac node.
+ */
+ new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr);
+ if (new_node) {
+ hclgevf_update_mac_node(new_node, mac_node->state);
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ } else if (mac_node->state == HCLGEVF_MAC_ACTIVE) {
+ mac_node->state = HCLGEVF_MAC_TO_DEL;
+ list_move_tail(&mac_node->node, mac_list);
+ } else {
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ }
+ }
+}
+
+static void hclgevf_sync_from_del_list(struct list_head *del_list,
+ struct list_head *mac_list)
+{
+ struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node;
+
+ list_for_each_entry_safe(mac_node, tmp, del_list, node) {
+ new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr);
+ if (new_node) {
+ /* If the mac addr is exist in the mac list, it means
+ * received a new request TO_ADD during the time window
+ * of sending mac addr configurrequest to PF, so just
+ * change the mac state to ACTIVE.
+ */
+ new_node->state = HCLGEVF_MAC_ACTIVE;
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ } else {
+ list_move_tail(&mac_node->node, mac_list);
+ }
+ }
+}
+
+static void hclgevf_clear_list(struct list_head *list)
+{
+ struct hclgevf_mac_addr_node *mac_node, *tmp;
+
+ list_for_each_entry_safe(mac_node, tmp, list, node) {
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ }
+}
+
+static void hclgevf_sync_mac_list(struct hclgevf_dev *hdev,
+ enum HCLGEVF_MAC_ADDR_TYPE mac_type)
+{
+ struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node;
+ struct list_head tmp_add_list, tmp_del_list;
+ struct list_head *list;
+
+ INIT_LIST_HEAD(&tmp_add_list);
+ INIT_LIST_HEAD(&tmp_del_list);
+
+ /* move the mac addr to the tmp_add_list and tmp_del_list, then
+ * we can add/delete these mac addr outside the spin lock
+ */
+ list = (mac_type == HCLGEVF_MAC_ADDR_UC) ?
+ &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list;
+
+ spin_lock_bh(&hdev->mac_table.mac_list_lock);
+
+ list_for_each_entry_safe(mac_node, tmp, list, node) {
+ switch (mac_node->state) {
+ case HCLGEVF_MAC_TO_DEL:
+ list_move_tail(&mac_node->node, &tmp_del_list);
+ break;
+ case HCLGEVF_MAC_TO_ADD:
+ new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
+ if (!new_node)
+ goto stop_traverse;
+
+ ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
+ new_node->state = mac_node->state;
+ list_add_tail(&new_node->node, &tmp_add_list);
+ break;
+ default:
+ break;
+ }
+ }
+
+stop_traverse:
+ spin_unlock_bh(&hdev->mac_table.mac_list_lock);
+
+ /* delete first, in order to get max mac table space for adding */
+ hclgevf_config_mac_list(hdev, &tmp_del_list, mac_type);
+ hclgevf_config_mac_list(hdev, &tmp_add_list, mac_type);
+
+ /* if some mac addresses were added/deleted fail, move back to the
+ * mac_list, and retry at next time.
+ */
+ spin_lock_bh(&hdev->mac_table.mac_list_lock);
+
+ hclgevf_sync_from_del_list(&tmp_del_list, list);
+ hclgevf_sync_from_add_list(&tmp_add_list, list);
+
+ spin_unlock_bh(&hdev->mac_table.mac_list_lock);
+}
+
+static void hclgevf_sync_mac_table(struct hclgevf_dev *hdev)
+{
+ hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_UC);
+ hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_MC);
+}
+
+static void hclgevf_uninit_mac_list(struct hclgevf_dev *hdev)
+{
+ spin_lock_bh(&hdev->mac_table.mac_list_lock);
+
+ hclgevf_clear_list(&hdev->mac_table.uc_mac_list);
+ hclgevf_clear_list(&hdev->mac_table.mc_mac_list);
+
+ spin_unlock_bh(&hdev->mac_table.mac_list_lock);
+}
+
+static int hclgevf_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
+ struct hclge_vf_to_pf_msg send_msg;
+
+ if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps))
+ return -EOPNOTSUPP;
+
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
+ HCLGE_MBX_ENABLE_VLAN_FILTER);
+ send_msg.data[0] = enable ? 1 : 0;
+
+ return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
+}
+
+static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
+ __be16 proto, u16 vlan_id,
+ bool is_kill)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclge_mbx_vlan_filter *vlan_filter;
+ struct hclge_vf_to_pf_msg send_msg;
+ int ret;
+
+ if (vlan_id > HCLGEVF_MAX_VLAN_ID)
+ return -EINVAL;
+
+ if (proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+
+ /* When device is resetting or reset failed, firmware is unable to
+ * handle mailbox. Just record the vlan id, and remove it after
+ * reset finished.
+ */
+ if ((test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) ||
+ test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) && is_kill) {
+ set_bit(vlan_id, hdev->vlan_del_fail_bmap);
+ return -EBUSY;
+ } else if (!is_kill && test_bit(vlan_id, hdev->vlan_del_fail_bmap)) {
+ clear_bit(vlan_id, hdev->vlan_del_fail_bmap);
+ }
+
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
+ HCLGE_MBX_VLAN_FILTER);
+ vlan_filter = (struct hclge_mbx_vlan_filter *)send_msg.data;
+ vlan_filter->is_kill = is_kill;
+ vlan_filter->vlan_id = cpu_to_le16(vlan_id);
+ vlan_filter->proto = cpu_to_le16(be16_to_cpu(proto));
+
+ /* when remove hw vlan filter failed, record the vlan id,
+ * and try to remove it from hw later, to be consistence
+ * with stack.
+ */
+ ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
+ if (is_kill && ret)
+ set_bit(vlan_id, hdev->vlan_del_fail_bmap);
+
+ return ret;
+}
+
+static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev)
+{
+#define HCLGEVF_MAX_SYNC_COUNT 60
+ struct hnae3_handle *handle = &hdev->nic;
+ int ret, sync_cnt = 0;
+ u16 vlan_id;
+
+ if (bitmap_empty(hdev->vlan_del_fail_bmap, VLAN_N_VID))
+ return;
+
+ rtnl_lock();
+ vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID);
+ while (vlan_id != VLAN_N_VID) {
+ ret = hclgevf_set_vlan_filter(handle, htons(ETH_P_8021Q),
+ vlan_id, true);
+ if (ret)
+ break;
+
+ clear_bit(vlan_id, hdev->vlan_del_fail_bmap);
+ sync_cnt++;
+ if (sync_cnt >= HCLGEVF_MAX_SYNC_COUNT)
+ break;
+
+ vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID);
+ }
+ rtnl_unlock();
+}
+
+static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclge_vf_to_pf_msg send_msg;
+
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
+ HCLGE_MBX_VLAN_RX_OFF_CFG);
+ send_msg.data[0] = enable ? 1 : 0;
+ return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
+}
+
+static int hclgevf_reset_tqp(struct hnae3_handle *handle)
+{
+#define HCLGEVF_RESET_ALL_QUEUE_DONE 1U
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclge_vf_to_pf_msg send_msg;
+ u8 return_status = 0;
+ int ret;
+ u16 i;
+
+ /* disable vf queue before send queue reset msg to PF */
+ ret = hclgevf_tqp_enable(handle, false);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "failed to disable tqp, ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0);
+
+ ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &return_status,
+ sizeof(return_status));
+ if (ret || return_status == HCLGEVF_RESET_ALL_QUEUE_DONE)
+ return ret;
+
+ for (i = 1; i < handle->kinfo.num_tqps; i++) {
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0);
+ *(__le16 *)send_msg.data = cpu_to_le16(i);
+ ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclge_mbx_mtu_info *mtu_info;
+ struct hclge_vf_to_pf_msg send_msg;
+
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_MTU, 0);
+ mtu_info = (struct hclge_mbx_mtu_info *)send_msg.data;
+ mtu_info->mtu = cpu_to_le32(new_mtu);
+
+ return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
+}
+
+static int hclgevf_notify_client(struct hclgevf_dev *hdev,
+ enum hnae3_reset_notify_type type)
+{
+ struct hnae3_client *client = hdev->nic_client;
+ struct hnae3_handle *handle = &hdev->nic;
+ int ret;
+
+ if (!test_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state) ||
+ !client)
+ return 0;
+
+ if (!client->ops->reset_notify)
+ return -EOPNOTSUPP;
+
+ ret = client->ops->reset_notify(handle, type);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n",
+ type, ret);
+
+ return ret;
+}
+
+static int hclgevf_notify_roce_client(struct hclgevf_dev *hdev,
+ enum hnae3_reset_notify_type type)
+{
+ struct hnae3_client *client = hdev->roce_client;
+ struct hnae3_handle *handle = &hdev->roce;
+ int ret;
+
+ if (!test_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state) || !client)
+ return 0;
+
+ if (!client->ops->reset_notify)
+ return -EOPNOTSUPP;
+
+ ret = client->ops->reset_notify(handle, type);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)",
+ type, ret);
+ return ret;
+}
+
+static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
+{
+#define HCLGEVF_RESET_WAIT_US 20000
+#define HCLGEVF_RESET_WAIT_CNT 2000
+#define HCLGEVF_RESET_WAIT_TIMEOUT_US \
+ (HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT)
+
+ u32 val;
+ int ret;
+
+ if (hdev->reset_type == HNAE3_VF_RESET)
+ ret = readl_poll_timeout(hdev->hw.hw.io_base +
+ HCLGEVF_VF_RST_ING, val,
+ !(val & HCLGEVF_VF_RST_ING_BIT),
+ HCLGEVF_RESET_WAIT_US,
+ HCLGEVF_RESET_WAIT_TIMEOUT_US);
+ else
+ ret = readl_poll_timeout(hdev->hw.hw.io_base +
+ HCLGEVF_RST_ING, val,
+ !(val & HCLGEVF_RST_ING_BITS),
+ HCLGEVF_RESET_WAIT_US,
+ HCLGEVF_RESET_WAIT_TIMEOUT_US);
+
+ /* hardware completion status should be available by this time */
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "couldn't get reset done status from h/w, timeout!\n");
+ return ret;
+ }
+
+ /* we will wait a bit more to let reset of the stack to complete. This
+ * might happen in case reset assertion was made by PF. Yes, this also
+ * means we might end up waiting bit more even for VF reset.
+ */
+ if (hdev->reset_type == HNAE3_VF_FULL_RESET)
+ msleep(5000);
+ else
+ msleep(500);
+
+ return 0;
+}
+
+static void hclgevf_reset_handshake(struct hclgevf_dev *hdev, bool enable)
+{
+ u32 reg_val;
+
+ reg_val = hclgevf_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG);
+ if (enable)
+ reg_val |= HCLGEVF_NIC_SW_RST_RDY;
+ else
+ reg_val &= ~HCLGEVF_NIC_SW_RST_RDY;
+
+ hclgevf_write_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG,
+ reg_val);
+}
+
+static int hclgevf_reset_stack(struct hclgevf_dev *hdev)
+{
+ int ret;
+
+ /* uninitialize the nic client */
+ ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT);
+ if (ret)
+ return ret;
+
+ /* re-initialize the hclge device */
+ ret = hclgevf_reset_hdev(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "hclge device re-init failed, VF is disabled!\n");
+ return ret;
+ }
+
+ /* bring up the nic client again */
+ ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT);
+ if (ret)
+ return ret;
+
+ /* clear handshake status with IMP */
+ hclgevf_reset_handshake(hdev, false);
+
+ /* bring up the nic to enable TX/RX again */
+ return hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
+}
+
+static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
+{
+#define HCLGEVF_RESET_SYNC_TIME 100
+
+ if (hdev->reset_type == HNAE3_VF_FUNC_RESET) {
+ struct hclge_vf_to_pf_msg send_msg;
+ int ret;
+
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_RESET, 0);
+ ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to assert VF reset, ret = %d\n", ret);
+ return ret;
+ }
+ hdev->rst_stats.vf_func_rst_cnt++;
+ }
+
+ set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
+ /* inform hardware that preparatory work is done */
+ msleep(HCLGEVF_RESET_SYNC_TIME);
+ hclgevf_reset_handshake(hdev, true);
+ dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done\n",
+ hdev->reset_type);
+
+ return 0;
+}
+
+static void hclgevf_dump_rst_info(struct hclgevf_dev *hdev)
+{
+ dev_info(&hdev->pdev->dev, "VF function reset count: %u\n",
+ hdev->rst_stats.vf_func_rst_cnt);
+ dev_info(&hdev->pdev->dev, "FLR reset count: %u\n",
+ hdev->rst_stats.flr_rst_cnt);
+ dev_info(&hdev->pdev->dev, "VF reset count: %u\n",
+ hdev->rst_stats.vf_rst_cnt);
+ dev_info(&hdev->pdev->dev, "reset done count: %u\n",
+ hdev->rst_stats.rst_done_cnt);
+ dev_info(&hdev->pdev->dev, "HW reset done count: %u\n",
+ hdev->rst_stats.hw_rst_done_cnt);
+ dev_info(&hdev->pdev->dev, "reset count: %u\n",
+ hdev->rst_stats.rst_cnt);
+ dev_info(&hdev->pdev->dev, "reset fail count: %u\n",
+ hdev->rst_stats.rst_fail_cnt);
+ dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n",
+ hclgevf_read_dev(&hdev->hw, HCLGEVF_MISC_VECTOR_REG_BASE));
+ dev_info(&hdev->pdev->dev, "vector0 interrupt status: 0x%x\n",
+ hclgevf_read_dev(&hdev->hw, HCLGE_COMM_VECTOR0_CMDQ_STATE_REG));
+ dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n",
+ hclgevf_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG));
+ dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n",
+ hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING));
+ dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state);
+}
+
+static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev)
+{
+ /* recover handshake status with IMP when reset fail */
+ hclgevf_reset_handshake(hdev, true);
+ hdev->rst_stats.rst_fail_cnt++;
+ dev_err(&hdev->pdev->dev, "failed to reset VF(%u)\n",
+ hdev->rst_stats.rst_fail_cnt);
+
+ if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT)
+ set_bit(hdev->reset_type, &hdev->reset_pending);
+
+ if (hclgevf_is_reset_pending(hdev)) {
+ set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
+ hclgevf_reset_task_schedule(hdev);
+ } else {
+ set_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
+ hclgevf_dump_rst_info(hdev);
+ }
+}
+
+static int hclgevf_reset_prepare(struct hclgevf_dev *hdev)
+{
+ int ret;
+
+ hdev->rst_stats.rst_cnt++;
+
+ /* perform reset of the stack & ae device for a client */
+ ret = hclgevf_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
+ if (ret)
+ return ret;
+
+ rtnl_lock();
+ /* bring down the nic to stop any ongoing TX/RX */
+ ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ rtnl_unlock();
+ if (ret)
+ return ret;
+
+ return hclgevf_reset_prepare_wait(hdev);
+}
+
+static int hclgevf_reset_rebuild(struct hclgevf_dev *hdev)
+{
+ int ret;
+
+ hdev->rst_stats.hw_rst_done_cnt++;
+ ret = hclgevf_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
+ if (ret)
+ return ret;
+
+ rtnl_lock();
+ /* now, re-initialize the nic client and ae device */
+ ret = hclgevf_reset_stack(hdev);
+ rtnl_unlock();
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "failed to reset VF stack\n");
+ return ret;
+ }
+
+ ret = hclgevf_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
+ /* ignore RoCE notify error if it fails HCLGEVF_RESET_MAX_FAIL_CNT - 1
+ * times
+ */
+ if (ret &&
+ hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT - 1)
+ return ret;
+
+ ret = hclgevf_notify_roce_client(hdev, HNAE3_UP_CLIENT);
+ if (ret)
+ return ret;
+
+ hdev->last_reset_time = jiffies;
+ hdev->rst_stats.rst_done_cnt++;
+ hdev->rst_stats.rst_fail_cnt = 0;
+ clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
+
+ return 0;
+}
+
+static void hclgevf_reset(struct hclgevf_dev *hdev)
+{
+ if (hclgevf_reset_prepare(hdev))
+ goto err_reset;
+
+ /* check if VF could successfully fetch the hardware reset completion
+ * status from the hardware
+ */
+ if (hclgevf_reset_wait(hdev)) {
+ /* can't do much in this situation, will disable VF */
+ dev_err(&hdev->pdev->dev,
+ "failed to fetch H/W reset completion status\n");
+ goto err_reset;
+ }
+
+ if (hclgevf_reset_rebuild(hdev))
+ goto err_reset;
+
+ return;
+
+err_reset:
+ hclgevf_reset_err_handle(hdev);
+}
+
+static enum hnae3_reset_type hclgevf_get_reset_level(unsigned long *addr)
+{
+ enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
+
+ /* return the highest priority reset level amongst all */
+ if (test_bit(HNAE3_VF_RESET, addr)) {
+ rst_level = HNAE3_VF_RESET;
+ clear_bit(HNAE3_VF_RESET, addr);
+ clear_bit(HNAE3_VF_PF_FUNC_RESET, addr);
+ clear_bit(HNAE3_VF_FUNC_RESET, addr);
+ } else if (test_bit(HNAE3_VF_FULL_RESET, addr)) {
+ rst_level = HNAE3_VF_FULL_RESET;
+ clear_bit(HNAE3_VF_FULL_RESET, addr);
+ clear_bit(HNAE3_VF_FUNC_RESET, addr);
+ } else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) {
+ rst_level = HNAE3_VF_PF_FUNC_RESET;
+ clear_bit(HNAE3_VF_PF_FUNC_RESET, addr);
+ clear_bit(HNAE3_VF_FUNC_RESET, addr);
+ } else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) {
+ rst_level = HNAE3_VF_FUNC_RESET;
+ clear_bit(HNAE3_VF_FUNC_RESET, addr);
+ } else if (test_bit(HNAE3_FLR_RESET, addr)) {
+ rst_level = HNAE3_FLR_RESET;
+ clear_bit(HNAE3_FLR_RESET, addr);
+ }
+
+ return rst_level;
+}
+
+static void hclgevf_reset_event(struct pci_dev *pdev,
+ struct hnae3_handle *handle)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+ struct hclgevf_dev *hdev = ae_dev->priv;
+
+ dev_info(&hdev->pdev->dev, "received reset request from VF enet\n");
+
+ if (hdev->default_reset_request)
+ hdev->reset_level =
+ hclgevf_get_reset_level(&hdev->default_reset_request);
+ else
+ hdev->reset_level = HNAE3_VF_FUNC_RESET;
+
+ /* reset of this VF requested */
+ set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state);
+ hclgevf_reset_task_schedule(hdev);
+
+ hdev->last_reset_time = jiffies;
+}
+
+static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
+ enum hnae3_reset_type rst_type)
+{
+ struct hclgevf_dev *hdev = ae_dev->priv;
+
+ set_bit(rst_type, &hdev->default_reset_request);
+}
+
+static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en)
+{
+ writel(en ? 1 : 0, vector->addr);
+}
+
+static void hclgevf_reset_prepare_general(struct hnae3_ae_dev *ae_dev,
+ enum hnae3_reset_type rst_type)
+{
+#define HCLGEVF_RESET_RETRY_WAIT_MS 500
+#define HCLGEVF_RESET_RETRY_CNT 5
+
+ struct hclgevf_dev *hdev = ae_dev->priv;
+ int retry_cnt = 0;
+ int ret;
+
+ while (retry_cnt++ < HCLGEVF_RESET_RETRY_CNT) {
+ down(&hdev->reset_sem);
+ set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
+ hdev->reset_type = rst_type;
+ ret = hclgevf_reset_prepare(hdev);
+ if (!ret && !hdev->reset_pending)
+ break;
+
+ dev_err(&hdev->pdev->dev,
+ "failed to prepare to reset, ret=%d, reset_pending:0x%lx, retry_cnt:%d\n",
+ ret, hdev->reset_pending, retry_cnt);
+ clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
+ up(&hdev->reset_sem);
+ msleep(HCLGEVF_RESET_RETRY_WAIT_MS);
+ }
+
+ /* disable misc vector before reset done */
+ hclgevf_enable_vector(&hdev->misc_vector, false);
+
+ if (hdev->reset_type == HNAE3_FLR_RESET)
+ hdev->rst_stats.flr_rst_cnt++;
+}
+
+static void hclgevf_reset_done(struct hnae3_ae_dev *ae_dev)
+{
+ struct hclgevf_dev *hdev = ae_dev->priv;
+ int ret;
+
+ hclgevf_enable_vector(&hdev->misc_vector, true);
+
+ ret = hclgevf_reset_rebuild(hdev);
+ if (ret)
+ dev_warn(&hdev->pdev->dev, "fail to rebuild, ret=%d\n",
+ ret);
+
+ hdev->reset_type = HNAE3_NONE_RESET;
+ clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
+ up(&hdev->reset_sem);
+}
+
+static u32 hclgevf_get_fw_version(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ return hdev->fw_version;
+}
+
+static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev)
+{
+ struct hclgevf_misc_vector *vector = &hdev->misc_vector;
+
+ vector->vector_irq = pci_irq_vector(hdev->pdev,
+ HCLGEVF_MISC_VECTOR_NUM);
+ vector->addr = hdev->hw.hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE;
+ /* vector status always valid for Vector 0 */
+ hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0;
+ hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq;
+
+ hdev->num_msi_left -= 1;
+ hdev->num_msi_used += 1;
+}
+
+void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev)
+{
+ if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) &&
+ test_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state) &&
+ !test_and_set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED,
+ &hdev->state))
+ mod_delayed_work(hclgevf_wq, &hdev->service_task, 0);
+}
+
+void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev)
+{
+ if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) &&
+ !test_and_set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED,
+ &hdev->state))
+ mod_delayed_work(hclgevf_wq, &hdev->service_task, 0);
+}
+
+static void hclgevf_task_schedule(struct hclgevf_dev *hdev,
+ unsigned long delay)
+{
+ if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) &&
+ !test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state))
+ mod_delayed_work(hclgevf_wq, &hdev->service_task, delay);
+}
+
+static void hclgevf_reset_service_task(struct hclgevf_dev *hdev)
+{
+#define HCLGEVF_MAX_RESET_ATTEMPTS_CNT 3
+
+ if (!test_and_clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state))
+ return;
+
+ down(&hdev->reset_sem);
+ set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
+
+ if (test_and_clear_bit(HCLGEVF_RESET_PENDING,
+ &hdev->reset_state)) {
+ /* PF has intimated that it is about to reset the hardware.
+ * We now have to poll & check if hardware has actually
+ * completed the reset sequence. On hardware reset completion,
+ * VF needs to reset the client and ae device.
+ */
+ hdev->reset_attempts = 0;
+
+ hdev->last_reset_time = jiffies;
+ hdev->reset_type =
+ hclgevf_get_reset_level(&hdev->reset_pending);
+ if (hdev->reset_type != HNAE3_NONE_RESET)
+ hclgevf_reset(hdev);
+ } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED,
+ &hdev->reset_state)) {
+ /* we could be here when either of below happens:
+ * 1. reset was initiated due to watchdog timeout caused by
+ * a. IMP was earlier reset and our TX got choked down and
+ * which resulted in watchdog reacting and inducing VF
+ * reset. This also means our cmdq would be unreliable.
+ * b. problem in TX due to other lower layer(example link
+ * layer not functioning properly etc.)
+ * 2. VF reset might have been initiated due to some config
+ * change.
+ *
+ * NOTE: Theres no clear way to detect above cases than to react
+ * to the response of PF for this reset request. PF will ack the
+ * 1b and 2. cases but we will not get any intimation about 1a
+ * from PF as cmdq would be in unreliable state i.e. mailbox
+ * communication between PF and VF would be broken.
+ *
+ * if we are never geting into pending state it means either:
+ * 1. PF is not receiving our request which could be due to IMP
+ * reset
+ * 2. PF is screwed
+ * We cannot do much for 2. but to check first we can try reset
+ * our PCIe + stack and see if it alleviates the problem.
+ */
+ if (hdev->reset_attempts > HCLGEVF_MAX_RESET_ATTEMPTS_CNT) {
+ /* prepare for full reset of stack + pcie interface */
+ set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending);
+
+ /* "defer" schedule the reset task again */
+ set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
+ } else {
+ hdev->reset_attempts++;
+
+ set_bit(hdev->reset_level, &hdev->reset_pending);
+ set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
+ }
+ hclgevf_reset_task_schedule(hdev);
+ }
+
+ hdev->reset_type = HNAE3_NONE_RESET;
+ clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
+ up(&hdev->reset_sem);
+}
+
+static void hclgevf_mailbox_service_task(struct hclgevf_dev *hdev)
+{
+ if (!test_and_clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state))
+ return;
+
+ if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state))
+ return;
+
+ hclgevf_mbx_async_handler(hdev);
+
+ clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
+}
+
+static void hclgevf_keep_alive(struct hclgevf_dev *hdev)
+{
+ struct hclge_vf_to_pf_msg send_msg;
+ int ret;
+
+ if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state))
+ return;
+
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_KEEP_ALIVE, 0);
+ ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "VF sends keep alive cmd failed(=%d)\n", ret);
+}
+
+static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev)
+{
+ unsigned long delta = round_jiffies_relative(HZ);
+ struct hnae3_handle *handle = &hdev->nic;
+
+ if (test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state) ||
+ test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state))
+ return;
+
+ if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
+ delta = jiffies - hdev->last_serv_processed;
+
+ if (delta < round_jiffies_relative(HZ)) {
+ delta = round_jiffies_relative(HZ) - delta;
+ goto out;
+ }
+ }
+
+ hdev->serv_processed_cnt++;
+ if (!(hdev->serv_processed_cnt % HCLGEVF_KEEP_ALIVE_TASK_INTERVAL))
+ hclgevf_keep_alive(hdev);
+
+ if (test_bit(HCLGEVF_STATE_DOWN, &hdev->state)) {
+ hdev->last_serv_processed = jiffies;
+ goto out;
+ }
+
+ if (!(hdev->serv_processed_cnt % HCLGEVF_STATS_TIMER_INTERVAL))
+ hclge_comm_tqps_update_stats(handle, &hdev->hw.hw);
+
+ /* VF does not need to request link status when this bit is set, because
+ * PF will push its link status to VFs when link status changed.
+ */
+ if (!test_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS, &hdev->state))
+ hclgevf_request_link_info(hdev);
+
+ hclgevf_update_link_mode(hdev);
+
+ hclgevf_sync_vlan_filter(hdev);
+
+ hclgevf_sync_mac_table(hdev);
+
+ hclgevf_sync_promisc_mode(hdev);
+
+ hdev->last_serv_processed = jiffies;
+
+out:
+ hclgevf_task_schedule(hdev, delta);
+}
+
+static void hclgevf_service_task(struct work_struct *work)
+{
+ struct hclgevf_dev *hdev = container_of(work, struct hclgevf_dev,
+ service_task.work);
+
+ hclgevf_reset_service_task(hdev);
+ hclgevf_mailbox_service_task(hdev);
+ hclgevf_periodic_service_task(hdev);
+
+ /* Handle reset and mbx again in case periodical task delays the
+ * handling by calling hclgevf_task_schedule() in
+ * hclgevf_periodic_service_task()
+ */
+ hclgevf_reset_service_task(hdev);
+ hclgevf_mailbox_service_task(hdev);
+}
+
+static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr)
+{
+ hclgevf_write_dev(&hdev->hw, HCLGE_COMM_VECTOR0_CMDQ_SRC_REG, regclr);
+}
+
+static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
+ u32 *clearval)
+{
+ u32 val, cmdq_stat_reg, rst_ing_reg;
+
+ /* fetch the events from their corresponding regs */
+ cmdq_stat_reg = hclgevf_read_dev(&hdev->hw,
+ HCLGE_COMM_VECTOR0_CMDQ_STATE_REG);
+ if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_stat_reg) {
+ rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
+ dev_info(&hdev->pdev->dev,
+ "receive reset interrupt 0x%x!\n", rst_ing_reg);
+ set_bit(HNAE3_VF_RESET, &hdev->reset_pending);
+ set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
+ set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
+ *clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B);
+ hdev->rst_stats.vf_rst_cnt++;
+ /* set up VF hardware reset status, its PF will clear
+ * this status when PF has initialized done.
+ */
+ val = hclgevf_read_dev(&hdev->hw, HCLGEVF_VF_RST_ING);
+ hclgevf_write_dev(&hdev->hw, HCLGEVF_VF_RST_ING,
+ val | HCLGEVF_VF_RST_ING_BIT);
+ return HCLGEVF_VECTOR0_EVENT_RST;
+ }
+
+ /* check for vector0 mailbox(=CMDQ RX) event source */
+ if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_stat_reg) {
+ /* for revision 0x21, clearing interrupt is writing bit 0
+ * to the clear register, writing bit 1 means to keep the
+ * old value.
+ * for revision 0x20, the clear register is a read & write
+ * register, so we should just write 0 to the bit we are
+ * handling, and keep other bits as cmdq_stat_reg.
+ */
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
+ *clearval = ~(1U << HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
+ else
+ *clearval = cmdq_stat_reg &
+ ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
+
+ return HCLGEVF_VECTOR0_EVENT_MBX;
+ }
+
+ /* print other vector0 event source */
+ dev_info(&hdev->pdev->dev,
+ "vector 0 interrupt from unknown source, cmdq_src = %#x\n",
+ cmdq_stat_reg);
+
+ return HCLGEVF_VECTOR0_EVENT_OTHER;
+}
+
+static void hclgevf_reset_timer(struct timer_list *t)
+{
+ struct hclgevf_dev *hdev = from_timer(hdev, t, reset_timer);
+
+ hclgevf_clear_event_cause(hdev, HCLGEVF_VECTOR0_EVENT_RST);
+ hclgevf_reset_task_schedule(hdev);
+}
+
+static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
+{
+#define HCLGEVF_RESET_DELAY 5
+
+ enum hclgevf_evt_cause event_cause;
+ struct hclgevf_dev *hdev = data;
+ u32 clearval;
+
+ hclgevf_enable_vector(&hdev->misc_vector, false);
+ event_cause = hclgevf_check_evt_cause(hdev, &clearval);
+ if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER)
+ hclgevf_clear_event_cause(hdev, clearval);
+
+ switch (event_cause) {
+ case HCLGEVF_VECTOR0_EVENT_RST:
+ mod_timer(&hdev->reset_timer,
+ jiffies + msecs_to_jiffies(HCLGEVF_RESET_DELAY));
+ break;
+ case HCLGEVF_VECTOR0_EVENT_MBX:
+ hclgevf_mbx_handler(hdev);
+ break;
+ default:
+ break;
+ }
+
+ hclgevf_enable_vector(&hdev->misc_vector, true);
+
+ return IRQ_HANDLED;
+}
+
+static int hclgevf_configure(struct hclgevf_dev *hdev)
+{
+ int ret;
+
+ hdev->gro_en = true;
+
+ ret = hclgevf_get_basic_info(hdev);
+ if (ret)
+ return ret;
+
+ /* get current port based vlan state from PF */
+ ret = hclgevf_get_port_base_vlan_filter_state(hdev);
+ if (ret)
+ return ret;
+
+ /* get queue configuration from PF */
+ ret = hclgevf_get_queue_info(hdev);
+ if (ret)
+ return ret;
+
+ /* get queue depth info from PF */
+ ret = hclgevf_get_queue_depth(hdev);
+ if (ret)
+ return ret;
+
+ return hclgevf_get_pf_media_type(hdev);
+}
+
+static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev)
+{
+ struct pci_dev *pdev = ae_dev->pdev;
+ struct hclgevf_dev *hdev;
+
+ hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
+ if (!hdev)
+ return -ENOMEM;
+
+ hdev->pdev = pdev;
+ hdev->ae_dev = ae_dev;
+ ae_dev->priv = hdev;
+
+ return 0;
+}
+
+static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev)
+{
+ struct hnae3_handle *roce = &hdev->roce;
+ struct hnae3_handle *nic = &hdev->nic;
+
+ roce->rinfo.num_vectors = hdev->num_roce_msix;
+
+ if (hdev->num_msi_left < roce->rinfo.num_vectors ||
+ hdev->num_msi_left == 0)
+ return -EINVAL;
+
+ roce->rinfo.base_vector = hdev->roce_base_msix_offset;
+
+ roce->rinfo.netdev = nic->kinfo.netdev;
+ roce->rinfo.roce_io_base = hdev->hw.hw.io_base;
+ roce->rinfo.roce_mem_base = hdev->hw.hw.mem_base;
+
+ roce->pdev = nic->pdev;
+ roce->ae_algo = nic->ae_algo;
+ roce->numa_node_mask = nic->numa_node_mask;
+
+ return 0;
+}
+
+static int hclgevf_config_gro(struct hclgevf_dev *hdev)
+{
+ struct hclgevf_cfg_gro_status_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ if (!hnae3_ae_dev_gro_supported(hdev->ae_dev))
+ return 0;
+
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG,
+ false);
+ req = (struct hclgevf_cfg_gro_status_cmd *)desc.data;
+
+ req->gro_en = hdev->gro_en ? 1 : 0;
+
+ ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "VF GRO hardware config cmd failed, ret = %d.\n", ret);
+
+ return ret;
+}
+
+static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
+{
+ struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
+ u16 tc_offset[HCLGE_COMM_MAX_TC_NUM];
+ u16 tc_valid[HCLGE_COMM_MAX_TC_NUM];
+ u16 tc_size[HCLGE_COMM_MAX_TC_NUM];
+ int ret;
+
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
+ ret = hclge_comm_set_rss_algo_key(&hdev->hw.hw,
+ rss_cfg->rss_algo,
+ rss_cfg->rss_hash_key);
+ if (ret)
+ return ret;
+
+ ret = hclge_comm_set_rss_input_tuple(&hdev->hw.hw, rss_cfg);
+ if (ret)
+ return ret;
+ }
+
+ ret = hclge_comm_set_rss_indir_table(hdev->ae_dev, &hdev->hw.hw,
+ rss_cfg->rss_indirection_tbl);
+ if (ret)
+ return ret;
+
+ hclge_comm_get_rss_tc_info(rss_cfg->rss_size, hdev->hw_tc_map,
+ tc_offset, tc_valid, tc_size);
+
+ return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset,
+ tc_valid, tc_size);
+}
+
+static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
+{
+ struct hnae3_handle *nic = &hdev->nic;
+ int ret;
+
+ ret = hclgevf_en_hw_strip_rxvtag(nic, true);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to enable rx vlan offload, ret = %d\n", ret);
+ return ret;
+ }
+
+ return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0,
+ false);
+}
+
+static void hclgevf_flush_link_update(struct hclgevf_dev *hdev)
+{
+#define HCLGEVF_FLUSH_LINK_TIMEOUT 100000
+
+ unsigned long last = hdev->serv_processed_cnt;
+ int i = 0;
+
+ while (test_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state) &&
+ i++ < HCLGEVF_FLUSH_LINK_TIMEOUT &&
+ last == hdev->serv_processed_cnt)
+ usleep_range(1, 1);
+}
+
+static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ if (enable) {
+ hclgevf_task_schedule(hdev, 0);
+ } else {
+ set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
+
+ /* flush memory to make sure DOWN is seen by service task */
+ smp_mb__before_atomic();
+ hclgevf_flush_link_update(hdev);
+ }
+}
+
+static int hclgevf_ae_start(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ clear_bit(HCLGEVF_STATE_DOWN, &hdev->state);
+ clear_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS, &hdev->state);
+
+ hclge_comm_reset_tqp_stats(handle);
+
+ hclgevf_request_link_info(hdev);
+
+ hclgevf_update_link_mode(hdev);
+
+ return 0;
+}
+
+static void hclgevf_ae_stop(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
+
+ if (hdev->reset_type != HNAE3_VF_RESET)
+ hclgevf_reset_tqp(handle);
+
+ hclge_comm_reset_tqp_stats(handle);
+ hclgevf_update_link_status(hdev, 0);
+}
+
+static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive)
+{
+#define HCLGEVF_STATE_ALIVE 1
+#define HCLGEVF_STATE_NOT_ALIVE 0
+
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclge_vf_to_pf_msg send_msg;
+
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_ALIVE, 0);
+ send_msg.data[0] = alive ? HCLGEVF_STATE_ALIVE :
+ HCLGEVF_STATE_NOT_ALIVE;
+ return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
+}
+
+static int hclgevf_client_start(struct hnae3_handle *handle)
+{
+ return hclgevf_set_alive(handle, true);
+}
+
+static void hclgevf_client_stop(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ int ret;
+
+ ret = hclgevf_set_alive(handle, false);
+ if (ret)
+ dev_warn(&hdev->pdev->dev,
+ "%s failed %d\n", __func__, ret);
+}
+
+static void hclgevf_state_init(struct hclgevf_dev *hdev)
+{
+ clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
+ clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
+ clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
+
+ INIT_DELAYED_WORK(&hdev->service_task, hclgevf_service_task);
+
+ mutex_init(&hdev->mbx_resp.mbx_mutex);
+ sema_init(&hdev->reset_sem, 1);
+
+ spin_lock_init(&hdev->mac_table.mac_list_lock);
+ INIT_LIST_HEAD(&hdev->mac_table.uc_mac_list);
+ INIT_LIST_HEAD(&hdev->mac_table.mc_mac_list);
+
+ /* bring the device down */
+ set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
+}
+
+static void hclgevf_state_uninit(struct hclgevf_dev *hdev)
+{
+ set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
+ set_bit(HCLGEVF_STATE_REMOVING, &hdev->state);
+
+ if (hdev->service_task.work.func)
+ cancel_delayed_work_sync(&hdev->service_task);
+
+ mutex_destroy(&hdev->mbx_resp.mbx_mutex);
+}
+
+static int hclgevf_init_msi(struct hclgevf_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ int vectors;
+ int i;
+
+ if (hnae3_dev_roce_supported(hdev))
+ vectors = pci_alloc_irq_vectors(pdev,
+ hdev->roce_base_msix_offset + 1,
+ hdev->num_msi,
+ PCI_IRQ_MSIX);
+ else
+ vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
+ hdev->num_msi,
+ PCI_IRQ_MSI | PCI_IRQ_MSIX);
+
+ if (vectors < 0) {
+ dev_err(&pdev->dev,
+ "failed(%d) to allocate MSI/MSI-X vectors\n",
+ vectors);
+ return vectors;
+ }
+ if (vectors < hdev->num_msi)
+ dev_warn(&hdev->pdev->dev,
+ "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
+ hdev->num_msi, vectors);
+
+ hdev->num_msi = vectors;
+ hdev->num_msi_left = vectors;
+
+ hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
+ sizeof(u16), GFP_KERNEL);
+ if (!hdev->vector_status) {
+ pci_free_irq_vectors(pdev);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < hdev->num_msi; i++)
+ hdev->vector_status[i] = HCLGEVF_INVALID_VPORT;
+
+ hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
+ sizeof(int), GFP_KERNEL);
+ if (!hdev->vector_irq) {
+ devm_kfree(&pdev->dev, hdev->vector_status);
+ pci_free_irq_vectors(pdev);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void hclgevf_uninit_msi(struct hclgevf_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+
+ devm_kfree(&pdev->dev, hdev->vector_status);
+ devm_kfree(&pdev->dev, hdev->vector_irq);
+ pci_free_irq_vectors(pdev);
+}
+
+static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev)
+{
+ int ret;
+
+ hclgevf_get_misc_vector(hdev);
+
+ snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
+ HCLGEVF_NAME, pci_name(hdev->pdev));
+ ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle,
+ 0, hdev->misc_vector.name, hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n",
+ hdev->misc_vector.vector_irq);
+ return ret;
+ }
+
+ hclgevf_clear_event_cause(hdev, 0);
+
+ /* enable misc. vector(vector 0) */
+ hclgevf_enable_vector(&hdev->misc_vector, true);
+
+ return ret;
+}
+
+static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev)
+{
+ /* disable misc vector(vector 0) */
+ hclgevf_enable_vector(&hdev->misc_vector, false);
+ synchronize_irq(hdev->misc_vector.vector_irq);
+ free_irq(hdev->misc_vector.vector_irq, hdev);
+ hclgevf_free_vector(hdev, 0);
+}
+
+static void hclgevf_info_show(struct hclgevf_dev *hdev)
+{
+ struct device *dev = &hdev->pdev->dev;
+
+ dev_info(dev, "VF info begin:\n");
+
+ dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
+ dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
+ dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
+ dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
+ dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
+ dev_info(dev, "PF media type of this VF: %u\n",
+ hdev->hw.mac.media_type);
+
+ dev_info(dev, "VF info end.\n");
+}
+
+static int hclgevf_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
+ struct hnae3_client *client)
+{
+ struct hclgevf_dev *hdev = ae_dev->priv;
+ int rst_cnt = hdev->rst_stats.rst_cnt;
+ int ret;
+
+ ret = client->ops->init_instance(&hdev->nic);
+ if (ret)
+ return ret;
+
+ set_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state);
+ if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) ||
+ rst_cnt != hdev->rst_stats.rst_cnt) {
+ clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state);
+
+ client->ops->uninit_instance(&hdev->nic, 0);
+ return -EBUSY;
+ }
+
+ hnae3_set_client_init_flag(client, ae_dev, 1);
+
+ if (netif_msg_drv(&hdev->nic))
+ hclgevf_info_show(hdev);
+
+ return 0;
+}
+
+static int hclgevf_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
+ struct hnae3_client *client)
+{
+ struct hclgevf_dev *hdev = ae_dev->priv;
+ int ret;
+
+ if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
+ !hdev->nic_client)
+ return 0;
+
+ ret = hclgevf_init_roce_base_info(hdev);
+ if (ret)
+ return ret;
+
+ ret = client->ops->init_instance(&hdev->roce);
+ if (ret)
+ return ret;
+
+ set_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state);
+ hnae3_set_client_init_flag(client, ae_dev, 1);
+
+ return 0;
+}
+
+static int hclgevf_init_client_instance(struct hnae3_client *client,
+ struct hnae3_ae_dev *ae_dev)
+{
+ struct hclgevf_dev *hdev = ae_dev->priv;
+ int ret;
+
+ switch (client->type) {
+ case HNAE3_CLIENT_KNIC:
+ hdev->nic_client = client;
+ hdev->nic.client = client;
+
+ ret = hclgevf_init_nic_client_instance(ae_dev, client);
+ if (ret)
+ goto clear_nic;
+
+ ret = hclgevf_init_roce_client_instance(ae_dev,
+ hdev->roce_client);
+ if (ret)
+ goto clear_roce;
+
+ break;
+ case HNAE3_CLIENT_ROCE:
+ if (hnae3_dev_roce_supported(hdev)) {
+ hdev->roce_client = client;
+ hdev->roce.client = client;
+ }
+
+ ret = hclgevf_init_roce_client_instance(ae_dev, client);
+ if (ret)
+ goto clear_roce;
+
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+
+clear_nic:
+ hdev->nic_client = NULL;
+ hdev->nic.client = NULL;
+ return ret;
+clear_roce:
+ hdev->roce_client = NULL;
+ hdev->roce.client = NULL;
+ return ret;
+}
+
+static void hclgevf_uninit_client_instance(struct hnae3_client *client,
+ struct hnae3_ae_dev *ae_dev)
+{
+ struct hclgevf_dev *hdev = ae_dev->priv;
+
+ /* un-init roce, if it exists */
+ if (hdev->roce_client) {
+ while (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
+ msleep(HCLGEVF_WAIT_RESET_DONE);
+ clear_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state);
+
+ hdev->roce_client->ops->uninit_instance(&hdev->roce, 0);
+ hdev->roce_client = NULL;
+ hdev->roce.client = NULL;
+ }
+
+ /* un-init nic/unic, if this was not called by roce client */
+ if (client->ops->uninit_instance && hdev->nic_client &&
+ client->type != HNAE3_CLIENT_ROCE) {
+ while (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
+ msleep(HCLGEVF_WAIT_RESET_DONE);
+ clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state);
+
+ client->ops->uninit_instance(&hdev->nic, 0);
+ hdev->nic_client = NULL;
+ hdev->nic.client = NULL;
+ }
+}
+
+static int hclgevf_dev_mem_map(struct hclgevf_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ struct hclgevf_hw *hw = &hdev->hw;
+
+ /* for device does not have device memory, return directly */
+ if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGEVF_MEM_BAR)))
+ return 0;
+
+ hw->hw.mem_base =
+ devm_ioremap_wc(&pdev->dev,
+ pci_resource_start(pdev, HCLGEVF_MEM_BAR),
+ pci_resource_len(pdev, HCLGEVF_MEM_BAR));
+ if (!hw->hw.mem_base) {
+ dev_err(&pdev->dev, "failed to map device memory\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int hclgevf_pci_init(struct hclgevf_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ struct hclgevf_hw *hw;
+ int ret;
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable PCI device\n");
+ return ret;
+ }
+
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting");
+ goto err_disable_device;
+ }
+
+ ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME);
+ if (ret) {
+ dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
+ goto err_disable_device;
+ }
+
+ pci_set_master(pdev);
+ hw = &hdev->hw;
+ hw->hw.io_base = pci_iomap(pdev, 2, 0);
+ if (!hw->hw.io_base) {
+ dev_err(&pdev->dev, "can't map configuration register space\n");
+ ret = -ENOMEM;
+ goto err_release_regions;
+ }
+
+ ret = hclgevf_dev_mem_map(hdev);
+ if (ret)
+ goto err_unmap_io_base;
+
+ return 0;
+
+err_unmap_io_base:
+ pci_iounmap(pdev, hdev->hw.hw.io_base);
+err_release_regions:
+ pci_release_regions(pdev);
+err_disable_device:
+ pci_disable_device(pdev);
+
+ return ret;
+}
+
+static void hclgevf_pci_uninit(struct hclgevf_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+
+ if (hdev->hw.hw.mem_base)
+ devm_iounmap(&pdev->dev, hdev->hw.hw.mem_base);
+
+ pci_iounmap(pdev, hdev->hw.hw.io_base);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev)
+{
+ struct hclgevf_query_res_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RSRC, true);
+ ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "query vf resource failed, ret = %d.\n", ret);
+ return ret;
+ }
+
+ req = (struct hclgevf_query_res_cmd *)desc.data;
+
+ if (hnae3_dev_roce_supported(hdev)) {
+ hdev->roce_base_msix_offset =
+ hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee),
+ HCLGEVF_MSIX_OFT_ROCEE_M,
+ HCLGEVF_MSIX_OFT_ROCEE_S);
+ hdev->num_roce_msix =
+ hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number),
+ HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
+
+ /* nic's msix numbers is always equals to the roce's. */
+ hdev->num_nic_msix = hdev->num_roce_msix;
+
+ /* VF should have NIC vectors and Roce vectors, NIC vectors
+ * are queued before Roce vectors. The offset is fixed to 64.
+ */
+ hdev->num_msi = hdev->num_roce_msix +
+ hdev->roce_base_msix_offset;
+ } else {
+ hdev->num_msi =
+ hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number),
+ HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
+
+ hdev->num_nic_msix = hdev->num_msi;
+ }
+
+ if (hdev->num_nic_msix < HNAE3_MIN_VECTOR_NUM) {
+ dev_err(&hdev->pdev->dev,
+ "Just %u msi resources, not enough for vf(min:2).\n",
+ hdev->num_nic_msix);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void hclgevf_set_default_dev_specs(struct hclgevf_dev *hdev)
+{
+#define HCLGEVF_MAX_NON_TSO_BD_NUM 8U
+
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+
+ ae_dev->dev_specs.max_non_tso_bd_num =
+ HCLGEVF_MAX_NON_TSO_BD_NUM;
+ ae_dev->dev_specs.rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE;
+ ae_dev->dev_specs.rss_key_size = HCLGE_COMM_RSS_KEY_SIZE;
+ ae_dev->dev_specs.max_int_gl = HCLGEVF_DEF_MAX_INT_GL;
+ ae_dev->dev_specs.max_frm_size = HCLGEVF_MAC_MAX_FRAME;
+}
+
+static void hclgevf_parse_dev_specs(struct hclgevf_dev *hdev,
+ struct hclge_desc *desc)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ struct hclgevf_dev_specs_0_cmd *req0;
+ struct hclgevf_dev_specs_1_cmd *req1;
+
+ req0 = (struct hclgevf_dev_specs_0_cmd *)desc[0].data;
+ req1 = (struct hclgevf_dev_specs_1_cmd *)desc[1].data;
+
+ ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
+ ae_dev->dev_specs.rss_ind_tbl_size =
+ le16_to_cpu(req0->rss_ind_tbl_size);
+ ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
+ ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
+ ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
+ ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
+}
+
+static void hclgevf_check_dev_specs(struct hclgevf_dev *hdev)
+{
+ struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
+
+ if (!dev_specs->max_non_tso_bd_num)
+ dev_specs->max_non_tso_bd_num = HCLGEVF_MAX_NON_TSO_BD_NUM;
+ if (!dev_specs->rss_ind_tbl_size)
+ dev_specs->rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE;
+ if (!dev_specs->rss_key_size)
+ dev_specs->rss_key_size = HCLGE_COMM_RSS_KEY_SIZE;
+ if (!dev_specs->max_int_gl)
+ dev_specs->max_int_gl = HCLGEVF_DEF_MAX_INT_GL;
+ if (!dev_specs->max_frm_size)
+ dev_specs->max_frm_size = HCLGEVF_MAC_MAX_FRAME;
+}
+
+static int hclgevf_query_dev_specs(struct hclgevf_dev *hdev)
+{
+ struct hclge_desc desc[HCLGEVF_QUERY_DEV_SPECS_BD_NUM];
+ int ret;
+ int i;
+
+ /* set default specifications as devices lower than version V3 do not
+ * support querying specifications from firmware.
+ */
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
+ hclgevf_set_default_dev_specs(hdev);
+ return 0;
+ }
+
+ for (i = 0; i < HCLGEVF_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
+ hclgevf_cmd_setup_basic_desc(&desc[i],
+ HCLGE_OPC_QUERY_DEV_SPECS, true);
+ desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ }
+ hclgevf_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
+
+ ret = hclgevf_cmd_send(&hdev->hw, desc, HCLGEVF_QUERY_DEV_SPECS_BD_NUM);
+ if (ret)
+ return ret;
+
+ hclgevf_parse_dev_specs(hdev, desc);
+ hclgevf_check_dev_specs(hdev);
+
+ return 0;
+}
+
+static int hclgevf_pci_reset(struct hclgevf_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ int ret = 0;
+
+ if ((hdev->reset_type == HNAE3_VF_FULL_RESET ||
+ hdev->reset_type == HNAE3_FLR_RESET) &&
+ test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
+ hclgevf_misc_irq_uninit(hdev);
+ hclgevf_uninit_msi(hdev);
+ clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
+ }
+
+ if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
+ pci_set_master(pdev);
+ ret = hclgevf_init_msi(hdev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed(%d) to init MSI/MSI-X\n", ret);
+ return ret;
+ }
+
+ ret = hclgevf_misc_irq_init(hdev);
+ if (ret) {
+ hclgevf_uninit_msi(hdev);
+ dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n",
+ ret);
+ return ret;
+ }
+
+ set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
+ }
+
+ return ret;
+}
+
+static int hclgevf_clear_vport_list(struct hclgevf_dev *hdev)
+{
+ struct hclge_vf_to_pf_msg send_msg;
+
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_HANDLE_VF_TBL,
+ HCLGE_MBX_VPORT_LIST_CLEAR);
+ return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
+}
+
+static void hclgevf_init_rxd_adv_layout(struct hclgevf_dev *hdev)
+{
+ if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
+ hclgevf_write_dev(&hdev->hw, HCLGEVF_RXD_ADV_LAYOUT_EN_REG, 1);
+}
+
+static void hclgevf_uninit_rxd_adv_layout(struct hclgevf_dev *hdev)
+{
+ if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
+ hclgevf_write_dev(&hdev->hw, HCLGEVF_RXD_ADV_LAYOUT_EN_REG, 0);
+}
+
+static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ int ret;
+
+ ret = hclgevf_pci_reset(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "pci reset failed %d\n", ret);
+ return ret;
+ }
+
+ hclgevf_arq_init(hdev);
+ ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw,
+ &hdev->fw_version, false,
+ hdev->reset_pending);
+ if (ret) {
+ dev_err(&pdev->dev, "cmd failed %d\n", ret);
+ return ret;
+ }
+
+ ret = hclgevf_rss_init_hw(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed(%d) to initialize RSS\n", ret);
+ return ret;
+ }
+
+ ret = hclgevf_config_gro(hdev);
+ if (ret)
+ return ret;
+
+ ret = hclgevf_init_vlan_config(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed(%d) to initialize VLAN config\n", ret);
+ return ret;
+ }
+
+ /* get current port based vlan state from PF */
+ ret = hclgevf_get_port_base_vlan_filter_state(hdev);
+ if (ret)
+ return ret;
+
+ set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state);
+
+ hclgevf_init_rxd_adv_layout(hdev);
+
+ dev_info(&hdev->pdev->dev, "Reset done\n");
+
+ return 0;
+}
+
+static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ int ret;
+
+ ret = hclgevf_pci_init(hdev);
+ if (ret)
+ return ret;
+
+ ret = hclgevf_devlink_init(hdev);
+ if (ret)
+ goto err_devlink_init;
+
+ ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw);
+ if (ret)
+ goto err_cmd_queue_init;
+
+ hclgevf_arq_init(hdev);
+ ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw,
+ &hdev->fw_version, false,
+ hdev->reset_pending);
+ if (ret)
+ goto err_cmd_init;
+
+ /* Get vf resource */
+ ret = hclgevf_query_vf_resource(hdev);
+ if (ret)
+ goto err_cmd_init;
+
+ ret = hclgevf_query_dev_specs(hdev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to query dev specifications, ret = %d\n", ret);
+ goto err_cmd_init;
+ }
+
+ ret = hclgevf_init_msi(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret);
+ goto err_cmd_init;
+ }
+
+ hclgevf_state_init(hdev);
+ hdev->reset_level = HNAE3_VF_FUNC_RESET;
+ hdev->reset_type = HNAE3_NONE_RESET;
+
+ ret = hclgevf_misc_irq_init(hdev);
+ if (ret)
+ goto err_misc_irq_init;
+
+ set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
+
+ ret = hclgevf_configure(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret);
+ goto err_config;
+ }
+
+ ret = hclgevf_alloc_tqps(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret);
+ goto err_config;
+ }
+
+ ret = hclgevf_set_handle_info(hdev);
+ if (ret)
+ goto err_config;
+
+ ret = hclgevf_config_gro(hdev);
+ if (ret)
+ goto err_config;
+
+ /* Initialize RSS for this VF */
+ ret = hclge_comm_rss_init_cfg(&hdev->nic, hdev->ae_dev,
+ &hdev->rss_cfg);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
+ goto err_config;
+ }
+
+ ret = hclgevf_rss_init_hw(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed(%d) to initialize RSS\n", ret);
+ goto err_config;
+ }
+
+ /* ensure vf tbl list as empty before init */
+ ret = hclgevf_clear_vport_list(hdev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to clear tbl list configuration, ret = %d.\n",
+ ret);
+ goto err_config;
+ }
+
+ ret = hclgevf_init_vlan_config(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed(%d) to initialize VLAN config\n", ret);
+ goto err_config;
+ }
+
+ hclgevf_init_rxd_adv_layout(hdev);
+
+ set_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state);
+
+ hdev->last_reset_time = jiffies;
+ dev_info(&hdev->pdev->dev, "finished initializing %s driver\n",
+ HCLGEVF_DRIVER_NAME);
+
+ hclgevf_task_schedule(hdev, round_jiffies_relative(HZ));
+ timer_setup(&hdev->reset_timer, hclgevf_reset_timer, 0);
+
+ return 0;
+
+err_config:
+ hclgevf_misc_irq_uninit(hdev);
+err_misc_irq_init:
+ hclgevf_state_uninit(hdev);
+ hclgevf_uninit_msi(hdev);
+err_cmd_init:
+ hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
+err_cmd_queue_init:
+ hclgevf_devlink_uninit(hdev);
+err_devlink_init:
+ hclgevf_pci_uninit(hdev);
+ clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
+ return ret;
+}
+
+static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev)
+{
+ struct hclge_vf_to_pf_msg send_msg;
+
+ hclgevf_state_uninit(hdev);
+ hclgevf_uninit_rxd_adv_layout(hdev);
+
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_VF_UNINIT, 0);
+ hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
+
+ if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
+ hclgevf_misc_irq_uninit(hdev);
+ hclgevf_uninit_msi(hdev);
+ }
+
+ hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
+ hclgevf_devlink_uninit(hdev);
+ hclgevf_pci_uninit(hdev);
+ hclgevf_uninit_mac_list(hdev);
+}
+
+static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev)
+{
+ struct pci_dev *pdev = ae_dev->pdev;
+ int ret;
+
+ ret = hclgevf_alloc_hdev(ae_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "hclge device allocation failed\n");
+ return ret;
+ }
+
+ ret = hclgevf_init_hdev(ae_dev->priv);
+ if (ret) {
+ dev_err(&pdev->dev, "hclge device initialization failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
+{
+ struct hclgevf_dev *hdev = ae_dev->priv;
+
+ hclgevf_uninit_hdev(hdev);
+ ae_dev->priv = NULL;
+}
+
+static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev)
+{
+ struct hnae3_handle *nic = &hdev->nic;
+ struct hnae3_knic_private_info *kinfo = &nic->kinfo;
+
+ return min_t(u32, hdev->rss_size_max,
+ hdev->num_tqps / kinfo->tc_info.num_tc);
+}
+
+/**
+ * hclgevf_get_channels - Get the current channels enabled and max supported.
+ * @handle: hardware information for network interface
+ * @ch: ethtool channels structure
+ *
+ * We don't support separate tx and rx queues as channels. The other count
+ * represents how many queues are being used for control. max_combined counts
+ * how many queue pairs we can support. They may not be mapped 1 to 1 with
+ * q_vectors since we support a lot more queue pairs than q_vectors.
+ **/
+static void hclgevf_get_channels(struct hnae3_handle *handle,
+ struct ethtool_channels *ch)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ ch->max_combined = hclgevf_get_max_channels(hdev);
+ ch->other_count = 0;
+ ch->max_other = 0;
+ ch->combined_count = handle->kinfo.rss_size;
+}
+
+static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle,
+ u16 *alloc_tqps, u16 *max_rss_size)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ *alloc_tqps = hdev->num_tqps;
+ *max_rss_size = hdev->rss_size_max;
+}
+
+static void hclgevf_update_rss_size(struct hnae3_handle *handle,
+ u32 new_tqps_num)
+{
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ u16 max_rss_size;
+
+ kinfo->req_rss_size = new_tqps_num;
+
+ max_rss_size = min_t(u16, hdev->rss_size_max,
+ hdev->num_tqps / kinfo->tc_info.num_tc);
+
+ /* Use the user's configuration when it is not larger than
+ * max_rss_size, otherwise, use the maximum specification value.
+ */
+ if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size &&
+ kinfo->req_rss_size <= max_rss_size)
+ kinfo->rss_size = kinfo->req_rss_size;
+ else if (kinfo->rss_size > max_rss_size ||
+ (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size))
+ kinfo->rss_size = max_rss_size;
+
+ kinfo->num_tqps = kinfo->tc_info.num_tc * kinfo->rss_size;
+}
+
+static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
+ bool rxfh_configured)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ u16 tc_offset[HCLGE_COMM_MAX_TC_NUM];
+ u16 tc_valid[HCLGE_COMM_MAX_TC_NUM];
+ u16 tc_size[HCLGE_COMM_MAX_TC_NUM];
+ u16 cur_rss_size = kinfo->rss_size;
+ u16 cur_tqps = kinfo->num_tqps;
+ u32 *rss_indir;
+ unsigned int i;
+ int ret;
+
+ hclgevf_update_rss_size(handle, new_tqps_num);
+
+ hclge_comm_get_rss_tc_info(kinfo->rss_size, hdev->hw_tc_map,
+ tc_offset, tc_valid, tc_size);
+ ret = hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset,
+ tc_valid, tc_size);
+ if (ret)
+ return ret;
+
+ /* RSS indirection table has been configured by user */
+ if (rxfh_configured)
+ goto out;
+
+ /* Reinitializes the rss indirect table according to the new RSS size */
+ rss_indir = kcalloc(hdev->ae_dev->dev_specs.rss_ind_tbl_size,
+ sizeof(u32), GFP_KERNEL);
+ if (!rss_indir)
+ return -ENOMEM;
+
+ for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
+ rss_indir[i] = i % kinfo->rss_size;
+
+ hdev->rss_cfg.rss_size = kinfo->rss_size;
+
+ ret = hclgevf_set_rss(handle, rss_indir, NULL, 0);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
+ ret);
+
+ kfree(rss_indir);
+
+out:
+ if (!ret)
+ dev_info(&hdev->pdev->dev,
+ "Channels changed, rss_size from %u to %u, tqps from %u to %u",
+ cur_rss_size, kinfo->rss_size,
+ cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
+
+ return ret;
+}
+
+static int hclgevf_get_status(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ return hdev->hw.mac.link;
+}
+
+static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle,
+ u8 *auto_neg, u32 *speed,
+ u8 *duplex, u32 *lane_num)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ if (speed)
+ *speed = hdev->hw.mac.speed;
+ if (duplex)
+ *duplex = hdev->hw.mac.duplex;
+ if (auto_neg)
+ *auto_neg = AUTONEG_DISABLE;
+}
+
+void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed,
+ u8 duplex)
+{
+ hdev->hw.mac.speed = speed;
+ hdev->hw.mac.duplex = duplex;
+}
+
+static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ bool gro_en_old = hdev->gro_en;
+ int ret;
+
+ hdev->gro_en = enable;
+ ret = hclgevf_config_gro(hdev);
+ if (ret)
+ hdev->gro_en = gro_en_old;
+
+ return ret;
+}
+
+static void hclgevf_get_media_type(struct hnae3_handle *handle, u8 *media_type,
+ u8 *module_type)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ if (media_type)
+ *media_type = hdev->hw.mac.media_type;
+
+ if (module_type)
+ *module_type = hdev->hw.mac.module_type;
+}
+
+static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
+}
+
+static bool hclgevf_get_cmdq_stat(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ return test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
+}
+
+static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
+}
+
+static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ return hdev->rst_stats.hw_rst_done_cnt;
+}
+
+static void hclgevf_get_link_mode(struct hnae3_handle *handle,
+ unsigned long *supported,
+ unsigned long *advertising)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ *supported = hdev->hw.mac.supported;
+ *advertising = hdev->hw.mac.advertising;
+}
+
+void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state,
+ struct hclge_mbx_port_base_vlan *port_base_vlan)
+{
+ struct hnae3_handle *nic = &hdev->nic;
+ struct hclge_vf_to_pf_msg send_msg;
+ int ret;
+
+ rtnl_lock();
+
+ if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) ||
+ test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) {
+ dev_warn(&hdev->pdev->dev,
+ "is resetting when updating port based vlan info\n");
+ rtnl_unlock();
+ return;
+ }
+
+ ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ if (ret) {
+ rtnl_unlock();
+ return;
+ }
+
+ /* send msg to PF and wait update port based vlan info */
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
+ HCLGE_MBX_PORT_BASE_VLAN_CFG);
+ memcpy(send_msg.data, port_base_vlan, sizeof(*port_base_vlan));
+ ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
+ if (!ret) {
+ if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
+ nic->port_base_vlan_state = state;
+ else
+ nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
+ }
+
+ hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
+ rtnl_unlock();
+}
+
+static const struct hnae3_ae_ops hclgevf_ops = {
+ .init_ae_dev = hclgevf_init_ae_dev,
+ .uninit_ae_dev = hclgevf_uninit_ae_dev,
+ .reset_prepare = hclgevf_reset_prepare_general,
+ .reset_done = hclgevf_reset_done,
+ .init_client_instance = hclgevf_init_client_instance,
+ .uninit_client_instance = hclgevf_uninit_client_instance,
+ .start = hclgevf_ae_start,
+ .stop = hclgevf_ae_stop,
+ .client_start = hclgevf_client_start,
+ .client_stop = hclgevf_client_stop,
+ .map_ring_to_vector = hclgevf_map_ring_to_vector,
+ .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector,
+ .get_vector = hclgevf_get_vector,
+ .put_vector = hclgevf_put_vector,
+ .reset_queue = hclgevf_reset_tqp,
+ .get_mac_addr = hclgevf_get_mac_addr,
+ .set_mac_addr = hclgevf_set_mac_addr,
+ .add_uc_addr = hclgevf_add_uc_addr,
+ .rm_uc_addr = hclgevf_rm_uc_addr,
+ .add_mc_addr = hclgevf_add_mc_addr,
+ .rm_mc_addr = hclgevf_rm_mc_addr,
+ .get_stats = hclgevf_get_stats,
+ .update_stats = hclgevf_update_stats,
+ .get_strings = hclgevf_get_strings,
+ .get_sset_count = hclgevf_get_sset_count,
+ .get_rss_key_size = hclge_comm_get_rss_key_size,
+ .get_rss = hclgevf_get_rss,
+ .set_rss = hclgevf_set_rss,
+ .get_rss_tuple = hclgevf_get_rss_tuple,
+ .set_rss_tuple = hclgevf_set_rss_tuple,
+ .get_tc_size = hclgevf_get_tc_size,
+ .get_fw_version = hclgevf_get_fw_version,
+ .set_vlan_filter = hclgevf_set_vlan_filter,
+ .enable_vlan_filter = hclgevf_enable_vlan_filter,
+ .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag,
+ .reset_event = hclgevf_reset_event,
+ .set_default_reset_request = hclgevf_set_def_reset_request,
+ .set_channels = hclgevf_set_channels,
+ .get_channels = hclgevf_get_channels,
+ .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info,
+ .get_regs_len = hclgevf_get_regs_len,
+ .get_regs = hclgevf_get_regs,
+ .get_status = hclgevf_get_status,
+ .get_ksettings_an_result = hclgevf_get_ksettings_an_result,
+ .get_media_type = hclgevf_get_media_type,
+ .get_hw_reset_stat = hclgevf_get_hw_reset_stat,
+ .ae_dev_resetting = hclgevf_ae_dev_resetting,
+ .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt,
+ .set_gro_en = hclgevf_gro_en,
+ .set_mtu = hclgevf_set_mtu,
+ .get_global_queue_id = hclgevf_get_qid_global,
+ .set_timer_task = hclgevf_set_timer_task,
+ .get_link_mode = hclgevf_get_link_mode,
+ .set_promisc_mode = hclgevf_set_promisc_mode,
+ .request_update_promisc_mode = hclgevf_request_update_promisc_mode,
+ .get_cmdq_stat = hclgevf_get_cmdq_stat,
+};
+
+static struct hnae3_ae_algo ae_algovf = {
+ .ops = &hclgevf_ops,
+ .pdev_id_table = ae_algovf_pci_tbl,
+};
+
+static int __init hclgevf_init(void)
+{
+ pr_info("%s is initializing\n", HCLGEVF_NAME);
+
+ hclgevf_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGEVF_NAME);
+ if (!hclgevf_wq) {
+ pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME);
+ return -ENOMEM;
+ }
+
+ hnae3_register_ae_algo(&ae_algovf);
+
+ return 0;
+}
+
+static void __exit hclgevf_exit(void)
+{
+ hnae3_unregister_ae_algo(&ae_algovf);
+ destroy_workqueue(hclgevf_wq);
+}
+module_init(hclgevf_init);
+module_exit(hclgevf_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
+MODULE_DESCRIPTION("HCLGEVF Driver");
+MODULE_VERSION(HCLGEVF_MOD_VERSION);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
new file mode 100644
index 0000000000..a73f2bf3a5
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -0,0 +1,299 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2016-2017 Hisilicon Limited. */
+
+#ifndef __HCLGEVF_MAIN_H
+#define __HCLGEVF_MAIN_H
+#include <linux/fs.h>
+#include <linux/if_vlan.h>
+#include <linux/types.h>
+#include <net/devlink.h>
+#include "hclge_mbx.h"
+#include "hclgevf_cmd.h"
+#include "hnae3.h"
+#include "hclge_comm_rss.h"
+#include "hclge_comm_tqp_stats.h"
+
+#define HCLGEVF_MOD_VERSION "1.0"
+#define HCLGEVF_DRIVER_NAME "hclgevf"
+
+#define HCLGEVF_MAX_VLAN_ID 4095
+#define HCLGEVF_MISC_VECTOR_NUM 0
+
+#define HCLGEVF_INVALID_VPORT 0xffff
+#define HCLGEVF_GENERAL_TASK_INTERVAL 5
+#define HCLGEVF_KEEP_ALIVE_TASK_INTERVAL 2
+
+/* This number in actual depends upon the total number of VFs
+ * created by physical function. But the maximum number of
+ * possible vector-per-VF is {VFn(1-32), VECTn(32 + 1)}.
+ */
+#define HCLGEVF_MAX_VF_VECTOR_NUM (32 + 1)
+
+#define HCLGEVF_VECTOR_REG_BASE 0x20000
+#define HCLGEVF_MISC_VECTOR_REG_BASE 0x20400
+#define HCLGEVF_VECTOR_REG_OFFSET 0x4
+#define HCLGEVF_VECTOR_VF_OFFSET 0x100000
+
+/* bar registers for common func */
+#define HCLGEVF_GRO_EN_REG 0x28000
+#define HCLGEVF_RXD_ADV_LAYOUT_EN_REG 0x28008
+
+/* bar registers for rcb */
+#define HCLGEVF_RING_RX_ADDR_L_REG 0x80000
+#define HCLGEVF_RING_RX_ADDR_H_REG 0x80004
+#define HCLGEVF_RING_RX_BD_NUM_REG 0x80008
+#define HCLGEVF_RING_RX_BD_LENGTH_REG 0x8000C
+#define HCLGEVF_RING_RX_MERGE_EN_REG 0x80014
+#define HCLGEVF_RING_RX_TAIL_REG 0x80018
+#define HCLGEVF_RING_RX_HEAD_REG 0x8001C
+#define HCLGEVF_RING_RX_FBD_NUM_REG 0x80020
+#define HCLGEVF_RING_RX_OFFSET_REG 0x80024
+#define HCLGEVF_RING_RX_FBD_OFFSET_REG 0x80028
+#define HCLGEVF_RING_RX_STASH_REG 0x80030
+#define HCLGEVF_RING_RX_BD_ERR_REG 0x80034
+#define HCLGEVF_RING_TX_ADDR_L_REG 0x80040
+#define HCLGEVF_RING_TX_ADDR_H_REG 0x80044
+#define HCLGEVF_RING_TX_BD_NUM_REG 0x80048
+#define HCLGEVF_RING_TX_PRIORITY_REG 0x8004C
+#define HCLGEVF_RING_TX_TC_REG 0x80050
+#define HCLGEVF_RING_TX_MERGE_EN_REG 0x80054
+#define HCLGEVF_RING_TX_TAIL_REG 0x80058
+#define HCLGEVF_RING_TX_HEAD_REG 0x8005C
+#define HCLGEVF_RING_TX_FBD_NUM_REG 0x80060
+#define HCLGEVF_RING_TX_OFFSET_REG 0x80064
+#define HCLGEVF_RING_TX_EBD_NUM_REG 0x80068
+#define HCLGEVF_RING_TX_EBD_OFFSET_REG 0x80070
+#define HCLGEVF_RING_TX_BD_ERR_REG 0x80074
+#define HCLGEVF_RING_EN_REG 0x80090
+
+/* bar registers for tqp interrupt */
+#define HCLGEVF_TQP_INTR_CTRL_REG 0x20000
+#define HCLGEVF_TQP_INTR_GL0_REG 0x20100
+#define HCLGEVF_TQP_INTR_GL1_REG 0x20200
+#define HCLGEVF_TQP_INTR_GL2_REG 0x20300
+#define HCLGEVF_TQP_INTR_RL_REG 0x20900
+
+/* CMDQ register bits for RX event(=MBX event) */
+#define HCLGEVF_VECTOR0_RX_CMDQ_INT_B 1
+/* RST register bits for RESET event */
+#define HCLGEVF_VECTOR0_RST_INT_B 2
+
+#define HCLGEVF_TQP_RESET_TRY_TIMES 10
+/* Reset related Registers */
+#define HCLGEVF_RST_ING 0x20C00
+#define HCLGEVF_FUN_RST_ING_BIT BIT(0)
+#define HCLGEVF_GLOBAL_RST_ING_BIT BIT(5)
+#define HCLGEVF_CORE_RST_ING_BIT BIT(6)
+#define HCLGEVF_IMP_RST_ING_BIT BIT(7)
+#define HCLGEVF_RST_ING_BITS \
+ (HCLGEVF_FUN_RST_ING_BIT | HCLGEVF_GLOBAL_RST_ING_BIT | \
+ HCLGEVF_CORE_RST_ING_BIT | HCLGEVF_IMP_RST_ING_BIT)
+
+#define HCLGEVF_VF_RST_ING 0x07008
+#define HCLGEVF_VF_RST_ING_BIT BIT(16)
+
+#define HCLGEVF_WAIT_RESET_DONE 100
+
+#define HCLGEVF_RSS_IND_TBL_SIZE 512
+
+#define HCLGEVF_TQP_MEM_SIZE 0x10000
+#define HCLGEVF_MEM_BAR 4
+/* in the bar4, the first half is for roce, and the second half is for nic */
+#define HCLGEVF_NIC_MEM_OFFSET(hdev) \
+ (pci_resource_len((hdev)->pdev, HCLGEVF_MEM_BAR) >> 1)
+#define HCLGEVF_TQP_MEM_OFFSET(hdev, i) \
+ (HCLGEVF_NIC_MEM_OFFSET(hdev) + HCLGEVF_TQP_MEM_SIZE * (i))
+
+#define HCLGEVF_MAC_MAX_FRAME 9728
+
+#define HCLGEVF_STATS_TIMER_INTERVAL 36U
+
+#define hclgevf_read_dev(a, reg) \
+ hclge_comm_read_reg((a)->hw.io_base, reg)
+#define hclgevf_write_dev(a, reg, value) \
+ hclge_comm_write_reg((a)->hw.io_base, reg, value)
+
+enum hclgevf_evt_cause {
+ HCLGEVF_VECTOR0_EVENT_RST,
+ HCLGEVF_VECTOR0_EVENT_MBX,
+ HCLGEVF_VECTOR0_EVENT_OTHER,
+};
+
+/* states of hclgevf device & tasks */
+enum hclgevf_states {
+ /* device states */
+ HCLGEVF_STATE_DOWN,
+ HCLGEVF_STATE_DISABLED,
+ HCLGEVF_STATE_IRQ_INITED,
+ HCLGEVF_STATE_REMOVING,
+ HCLGEVF_STATE_NIC_REGISTERED,
+ HCLGEVF_STATE_ROCE_REGISTERED,
+ HCLGEVF_STATE_SERVICE_INITED,
+ /* task states */
+ HCLGEVF_STATE_RST_SERVICE_SCHED,
+ HCLGEVF_STATE_RST_HANDLING,
+ HCLGEVF_STATE_MBX_SERVICE_SCHED,
+ HCLGEVF_STATE_MBX_HANDLING,
+ HCLGEVF_STATE_LINK_UPDATING,
+ HCLGEVF_STATE_PROMISC_CHANGED,
+ HCLGEVF_STATE_RST_FAIL,
+ HCLGEVF_STATE_PF_PUSH_LINK_STATUS,
+};
+
+struct hclgevf_mac {
+ u8 media_type;
+ u8 module_type;
+ u8 mac_addr[ETH_ALEN];
+ int link;
+ u8 duplex;
+ u32 speed;
+ u64 supported;
+ u64 advertising;
+};
+
+struct hclgevf_hw {
+ struct hclge_comm_hw hw;
+ int num_vec;
+ struct hclgevf_mac mac;
+};
+
+struct hclgevf_cfg {
+ u8 tc_num;
+ u16 tqp_desc_num;
+ u16 rx_buf_len;
+ u8 phy_addr;
+ u8 media_type;
+ u8 mac_addr[ETH_ALEN];
+ u32 numa_node_map;
+};
+
+struct hclgevf_misc_vector {
+ u8 __iomem *addr;
+ int vector_irq;
+ char name[HNAE3_INT_NAME_LEN];
+};
+
+struct hclgevf_rst_stats {
+ u32 rst_cnt; /* the number of reset */
+ u32 vf_func_rst_cnt; /* the number of VF function reset */
+ u32 flr_rst_cnt; /* the number of FLR */
+ u32 vf_rst_cnt; /* the number of VF reset */
+ u32 rst_done_cnt; /* the number of reset completed */
+ u32 hw_rst_done_cnt; /* the number of HW reset completed */
+ u32 rst_fail_cnt; /* the number of VF reset fail */
+};
+
+enum HCLGEVF_MAC_ADDR_TYPE {
+ HCLGEVF_MAC_ADDR_UC,
+ HCLGEVF_MAC_ADDR_MC
+};
+
+enum HCLGEVF_MAC_NODE_STATE {
+ HCLGEVF_MAC_TO_ADD,
+ HCLGEVF_MAC_TO_DEL,
+ HCLGEVF_MAC_ACTIVE
+};
+
+struct hclgevf_mac_addr_node {
+ struct list_head node;
+ enum HCLGEVF_MAC_NODE_STATE state;
+ u8 mac_addr[ETH_ALEN];
+};
+
+struct hclgevf_mac_table_cfg {
+ spinlock_t mac_list_lock; /* protect mac address need to add/detele */
+ struct list_head uc_mac_list;
+ struct list_head mc_mac_list;
+};
+
+struct hclgevf_dev {
+ struct pci_dev *pdev;
+ struct hnae3_ae_dev *ae_dev;
+ struct hclgevf_hw hw;
+ struct hclgevf_misc_vector misc_vector;
+ struct hclge_comm_rss_cfg rss_cfg;
+ unsigned long state;
+ unsigned long flr_state;
+ unsigned long default_reset_request;
+ unsigned long last_reset_time;
+ enum hnae3_reset_type reset_level;
+ unsigned long reset_pending;
+ enum hnae3_reset_type reset_type;
+ struct timer_list reset_timer;
+
+#define HCLGEVF_RESET_REQUESTED 0
+#define HCLGEVF_RESET_PENDING 1
+ unsigned long reset_state; /* requested, pending */
+ struct hclgevf_rst_stats rst_stats;
+ u32 reset_attempts;
+ struct semaphore reset_sem; /* protect reset process */
+
+ u32 fw_version;
+ u16 mbx_api_version;
+ u16 num_tqps; /* num task queue pairs of this VF */
+
+ u16 alloc_rss_size; /* allocated RSS task queue */
+ u16 rss_size_max; /* HW defined max RSS task queue */
+
+ u16 num_alloc_vport; /* num vports this driver supports */
+ u32 numa_node_mask;
+ u16 rx_buf_len;
+ u16 num_tx_desc; /* desc num of per tx queue */
+ u16 num_rx_desc; /* desc num of per rx queue */
+ u8 hw_tc_map;
+ u8 has_pf_mac;
+
+ u16 num_msi;
+ u16 num_msi_left;
+ u16 num_msi_used;
+ u16 num_nic_msix; /* Num of nic vectors for this VF */
+ u16 num_roce_msix; /* Num of roce vectors for this VF */
+ u16 roce_base_msix_offset;
+ u16 *vector_status;
+ int *vector_irq;
+
+ bool gro_en;
+
+ unsigned long vlan_del_fail_bmap[BITS_TO_LONGS(VLAN_N_VID)];
+
+ struct hclgevf_mac_table_cfg mac_table;
+
+ struct hclgevf_mbx_resp_status mbx_resp; /* mailbox response */
+ struct hclgevf_mbx_arq_ring arq; /* mailbox async rx queue */
+
+ struct delayed_work service_task;
+
+ struct hclge_comm_tqp *htqp;
+
+ struct hnae3_handle nic;
+ struct hnae3_handle roce;
+
+ struct hnae3_client *nic_client;
+ struct hnae3_client *roce_client;
+ u32 flag;
+ unsigned long serv_processed_cnt;
+ unsigned long last_serv_processed;
+
+ struct devlink *devlink;
+};
+
+static inline bool hclgevf_is_reset_pending(struct hclgevf_dev *hdev)
+{
+ return !!hdev->reset_pending;
+}
+
+int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev,
+ struct hclge_vf_to_pf_msg *send_msg, bool need_resp,
+ u8 *resp_data, u16 resp_len);
+void hclgevf_mbx_handler(struct hclgevf_dev *hdev);
+void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev);
+
+void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state);
+void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed,
+ u8 duplex);
+void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev);
+void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev);
+void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state,
+ struct hclge_mbx_port_base_vlan *port_base_vlan);
+struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle);
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
new file mode 100644
index 0000000000..85c2a634c8
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
@@ -0,0 +1,388 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
+
+#include "hclge_mbx.h"
+#include "hclgevf_main.h"
+#include "hnae3.h"
+
+#define CREATE_TRACE_POINTS
+#include "hclgevf_trace.h"
+
+static int hclgevf_resp_to_errno(u16 resp_code)
+{
+ return resp_code ? -resp_code : 0;
+}
+
+#define HCLGEVF_MBX_MATCH_ID_START 1
+static void hclgevf_reset_mbx_resp_status(struct hclgevf_dev *hdev)
+{
+ /* this function should be called with mbx_resp.mbx_mutex held
+ * to protect the received_response from race condition
+ */
+ hdev->mbx_resp.received_resp = false;
+ hdev->mbx_resp.origin_mbx_msg = 0;
+ hdev->mbx_resp.resp_status = 0;
+ hdev->mbx_resp.match_id++;
+ /* Update match_id and ensure the value of match_id is not zero */
+ if (hdev->mbx_resp.match_id == 0)
+ hdev->mbx_resp.match_id = HCLGEVF_MBX_MATCH_ID_START;
+ memset(hdev->mbx_resp.additional_info, 0, HCLGE_MBX_MAX_RESP_DATA_SIZE);
+}
+
+/* hclgevf_get_mbx_resp: used to get a response from PF after VF sends a mailbox
+ * message to PF.
+ * @hdev: pointer to struct hclgevf_dev
+ * @code0: the message opcode VF send to PF.
+ * @code1: the message sub-opcode VF send to PF.
+ * @resp_data: pointer to store response data from PF to VF.
+ * @resp_len: the length of resp_data from PF to VF.
+ */
+static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
+ u8 *resp_data, u16 resp_len)
+{
+#define HCLGEVF_MAX_TRY_TIMES 500
+#define HCLGEVF_SLEEP_USECOND 1000
+ struct hclgevf_mbx_resp_status *mbx_resp;
+ u16 r_code0, r_code1;
+ int i = 0;
+
+ if (resp_len > HCLGE_MBX_MAX_RESP_DATA_SIZE) {
+ dev_err(&hdev->pdev->dev,
+ "VF mbx response len(=%u) exceeds maximum(=%u)\n",
+ resp_len,
+ HCLGE_MBX_MAX_RESP_DATA_SIZE);
+ return -EINVAL;
+ }
+
+ while ((!hdev->mbx_resp.received_resp) && (i < HCLGEVF_MAX_TRY_TIMES)) {
+ if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE,
+ &hdev->hw.hw.comm_state))
+ return -EIO;
+
+ usleep_range(HCLGEVF_SLEEP_USECOND, HCLGEVF_SLEEP_USECOND * 2);
+ i++;
+ }
+
+ /* ensure additional_info will be seen after received_resp */
+ smp_rmb();
+
+ if (i >= HCLGEVF_MAX_TRY_TIMES) {
+ dev_err(&hdev->pdev->dev,
+ "VF could not get mbx(%u,%u) resp(=%d) from PF in %d tries\n",
+ code0, code1, hdev->mbx_resp.received_resp, i);
+ return -EIO;
+ }
+
+ mbx_resp = &hdev->mbx_resp;
+ r_code0 = (u16)(mbx_resp->origin_mbx_msg >> 16);
+ r_code1 = (u16)(mbx_resp->origin_mbx_msg & 0xff);
+
+ if (mbx_resp->resp_status)
+ return mbx_resp->resp_status;
+
+ if (resp_data)
+ memcpy(resp_data, &mbx_resp->additional_info[0], resp_len);
+
+ hclgevf_reset_mbx_resp_status(hdev);
+
+ if (!(r_code0 == code0 && r_code1 == code1 && !mbx_resp->resp_status)) {
+ dev_err(&hdev->pdev->dev,
+ "VF could not match resp code(code0=%u,code1=%u), %d\n",
+ code0, code1, mbx_resp->resp_status);
+ dev_err(&hdev->pdev->dev,
+ "VF could not match resp r_code(r_code0=%u,r_code1=%u)\n",
+ r_code0, r_code1);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev,
+ struct hclge_vf_to_pf_msg *send_msg, bool need_resp,
+ u8 *resp_data, u16 resp_len)
+{
+ struct hclge_mbx_vf_to_pf_cmd *req;
+ struct hclge_desc desc;
+ int status;
+
+ req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
+
+ if (!send_msg) {
+ dev_err(&hdev->pdev->dev,
+ "failed to send mbx, msg is NULL\n");
+ return -EINVAL;
+ }
+
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false);
+ if (need_resp)
+ hnae3_set_bit(req->mbx_need_resp, HCLGE_MBX_NEED_RESP_B, 1);
+
+ memcpy(&req->msg, send_msg, sizeof(struct hclge_vf_to_pf_msg));
+
+ if (test_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state))
+ trace_hclge_vf_mbx_send(hdev, req);
+
+ /* synchronous send */
+ if (need_resp) {
+ mutex_lock(&hdev->mbx_resp.mbx_mutex);
+ hclgevf_reset_mbx_resp_status(hdev);
+ req->match_id = cpu_to_le16(hdev->mbx_resp.match_id);
+ status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "VF failed(=%d) to send mbx message to PF\n",
+ status);
+ mutex_unlock(&hdev->mbx_resp.mbx_mutex);
+ return status;
+ }
+
+ status = hclgevf_get_mbx_resp(hdev, send_msg->code,
+ send_msg->subcode, resp_data,
+ resp_len);
+ mutex_unlock(&hdev->mbx_resp.mbx_mutex);
+ } else {
+ /* asynchronous send */
+ status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "VF failed(=%d) to send mbx message to PF\n",
+ status);
+ return status;
+ }
+ }
+
+ return status;
+}
+
+static bool hclgevf_cmd_crq_empty(struct hclgevf_hw *hw)
+{
+ u32 tail = hclgevf_read_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG);
+
+ return tail == hw->hw.cmq.crq.next_to_use;
+}
+
+static void hclgevf_handle_mbx_response(struct hclgevf_dev *hdev,
+ struct hclge_mbx_pf_to_vf_cmd *req)
+{
+ u16 vf_mbx_msg_subcode = le16_to_cpu(req->msg.vf_mbx_msg_subcode);
+ u16 vf_mbx_msg_code = le16_to_cpu(req->msg.vf_mbx_msg_code);
+ struct hclgevf_mbx_resp_status *resp = &hdev->mbx_resp;
+ u16 resp_status = le16_to_cpu(req->msg.resp_status);
+ u16 match_id = le16_to_cpu(req->match_id);
+
+ if (resp->received_resp)
+ dev_warn(&hdev->pdev->dev,
+ "VF mbx resp flag not clear(%u)\n",
+ vf_mbx_msg_code);
+
+ resp->origin_mbx_msg = (vf_mbx_msg_code << 16);
+ resp->origin_mbx_msg |= vf_mbx_msg_subcode;
+ resp->resp_status = hclgevf_resp_to_errno(resp_status);
+ memcpy(resp->additional_info, req->msg.resp_data,
+ HCLGE_MBX_MAX_RESP_DATA_SIZE * sizeof(u8));
+
+ /* ensure additional_info will be seen before setting received_resp */
+ smp_wmb();
+
+ if (match_id) {
+ /* If match_id is not zero, it means PF support match_id.
+ * if the match_id is right, VF get the right response, or
+ * ignore the response. and driver will clear hdev->mbx_resp
+ * when send next message which need response.
+ */
+ if (match_id == resp->match_id)
+ resp->received_resp = true;
+ } else {
+ resp->received_resp = true;
+ }
+}
+
+static void hclgevf_handle_mbx_msg(struct hclgevf_dev *hdev,
+ struct hclge_mbx_pf_to_vf_cmd *req)
+{
+ /* we will drop the async msg if we find ARQ as full
+ * and continue with next message
+ */
+ if (atomic_read(&hdev->arq.count) >=
+ HCLGE_MBX_MAX_ARQ_MSG_NUM) {
+ dev_warn(&hdev->pdev->dev,
+ "Async Q full, dropping msg(%u)\n",
+ le16_to_cpu(req->msg.code));
+ return;
+ }
+
+ /* tail the async message in arq */
+ memcpy(hdev->arq.msg_q[hdev->arq.tail], &req->msg,
+ HCLGE_MBX_MAX_ARQ_MSG_SIZE * sizeof(u16));
+ hclge_mbx_tail_ptr_move_arq(hdev->arq);
+ atomic_inc(&hdev->arq.count);
+
+ hclgevf_mbx_task_schedule(hdev);
+}
+
+void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
+{
+ struct hclge_mbx_pf_to_vf_cmd *req;
+ struct hclge_comm_cmq_ring *crq;
+ struct hclge_desc *desc;
+ u16 flag;
+ u16 code;
+
+ crq = &hdev->hw.hw.cmq.crq;
+
+ while (!hclgevf_cmd_crq_empty(&hdev->hw)) {
+ if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE,
+ &hdev->hw.hw.comm_state)) {
+ dev_info(&hdev->pdev->dev, "vf crq need init\n");
+ return;
+ }
+
+ desc = &crq->desc[crq->next_to_use];
+ req = (struct hclge_mbx_pf_to_vf_cmd *)desc->data;
+
+ flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
+ code = le16_to_cpu(req->msg.code);
+ if (unlikely(!hnae3_get_bit(flag, HCLGEVF_CMDQ_RX_OUTVLD_B))) {
+ dev_warn(&hdev->pdev->dev,
+ "dropped invalid mailbox message, code = %u\n",
+ code);
+
+ /* dropping/not processing this invalid message */
+ crq->desc[crq->next_to_use].flag = 0;
+ hclge_mbx_ring_ptr_move_crq(crq);
+ continue;
+ }
+
+ trace_hclge_vf_mbx_get(hdev, req);
+
+ /* synchronous messages are time critical and need preferential
+ * treatment. Therefore, we need to acknowledge all the sync
+ * responses as quickly as possible so that waiting tasks do not
+ * timeout and simultaneously queue the async messages for later
+ * prcessing in context of mailbox task i.e. the slow path.
+ */
+ switch (code) {
+ case HCLGE_MBX_PF_VF_RESP:
+ hclgevf_handle_mbx_response(hdev, req);
+ break;
+ case HCLGE_MBX_LINK_STAT_CHANGE:
+ case HCLGE_MBX_ASSERTING_RESET:
+ case HCLGE_MBX_LINK_STAT_MODE:
+ case HCLGE_MBX_PUSH_VLAN_INFO:
+ case HCLGE_MBX_PUSH_PROMISC_INFO:
+ hclgevf_handle_mbx_msg(hdev, req);
+ break;
+ default:
+ dev_err(&hdev->pdev->dev,
+ "VF received unsupported(%u) mbx msg from PF\n",
+ code);
+ break;
+ }
+ crq->desc[crq->next_to_use].flag = 0;
+ hclge_mbx_ring_ptr_move_crq(crq);
+ }
+
+ /* Write back CMDQ_RQ header pointer, M7 need this pointer */
+ hclgevf_write_dev(&hdev->hw, HCLGE_COMM_NIC_CRQ_HEAD_REG,
+ crq->next_to_use);
+}
+
+static void hclgevf_parse_promisc_info(struct hclgevf_dev *hdev,
+ u16 promisc_info)
+{
+ if (!promisc_info)
+ dev_info(&hdev->pdev->dev,
+ "Promisc mode is closed by host for being untrusted.\n");
+}
+
+void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
+{
+ struct hclge_mbx_port_base_vlan *vlan_info;
+ struct hclge_mbx_link_status *link_info;
+ struct hclge_mbx_link_mode *link_mode;
+ enum hnae3_reset_type reset_type;
+ u16 link_status, state;
+ __le16 *msg_q;
+ u16 opcode;
+ u8 duplex;
+ u32 speed;
+ u32 tail;
+ u8 flag;
+ u16 idx;
+
+ tail = hdev->arq.tail;
+
+ /* process all the async queue messages */
+ while (tail != hdev->arq.head) {
+ if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE,
+ &hdev->hw.hw.comm_state)) {
+ dev_info(&hdev->pdev->dev,
+ "vf crq need init in async\n");
+ return;
+ }
+
+ msg_q = hdev->arq.msg_q[hdev->arq.head];
+ opcode = le16_to_cpu(msg_q[0]);
+ switch (opcode) {
+ case HCLGE_MBX_LINK_STAT_CHANGE:
+ link_info = (struct hclge_mbx_link_status *)(msg_q + 1);
+ link_status = le16_to_cpu(link_info->link_status);
+ speed = le32_to_cpu(link_info->speed);
+ duplex = (u8)le16_to_cpu(link_info->duplex);
+ flag = link_info->flag;
+
+ /* update upper layer with new link link status */
+ hclgevf_update_speed_duplex(hdev, speed, duplex);
+ hclgevf_update_link_status(hdev, link_status);
+
+ if (flag & HCLGE_MBX_PUSH_LINK_STATUS_EN)
+ set_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS,
+ &hdev->state);
+
+ break;
+ case HCLGE_MBX_LINK_STAT_MODE:
+ link_mode = (struct hclge_mbx_link_mode *)(msg_q + 1);
+ idx = le16_to_cpu(link_mode->idx);
+ if (idx)
+ hdev->hw.mac.supported =
+ le64_to_cpu(link_mode->link_mode);
+ else
+ hdev->hw.mac.advertising =
+ le64_to_cpu(link_mode->link_mode);
+ break;
+ case HCLGE_MBX_ASSERTING_RESET:
+ /* PF has asserted reset hence VF should go in pending
+ * state and poll for the hardware reset status till it
+ * has been completely reset. After this stack should
+ * eventually be re-initialized.
+ */
+ reset_type =
+ (enum hnae3_reset_type)le16_to_cpu(msg_q[1]);
+ set_bit(reset_type, &hdev->reset_pending);
+ set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
+ hclgevf_reset_task_schedule(hdev);
+
+ break;
+ case HCLGE_MBX_PUSH_VLAN_INFO:
+ vlan_info =
+ (struct hclge_mbx_port_base_vlan *)(msg_q + 1);
+ state = le16_to_cpu(vlan_info->state);
+ hclgevf_update_port_base_vlan_info(hdev, state,
+ vlan_info);
+ break;
+ case HCLGE_MBX_PUSH_PROMISC_INFO:
+ hclgevf_parse_promisc_info(hdev, le16_to_cpu(msg_q[1]));
+ break;
+ default:
+ dev_err(&hdev->pdev->dev,
+ "fetched unsupported(%u) message from arq\n",
+ opcode);
+ break;
+ }
+
+ hclge_mbx_head_ptr_move_arq(hdev->arq);
+ atomic_dec(&hdev->arq.count);
+ msg_q = hdev->arq.msg_q[hdev->arq.head];
+ }
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c
new file mode 100644
index 0000000000..65b9dcd381
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c
@@ -0,0 +1,164 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2023 Hisilicon Limited.
+
+#include "hclgevf_main.h"
+#include "hclgevf_regs.h"
+#include "hnae3.h"
+
+static const u32 cmdq_reg_addr_list[] = {HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG,
+ HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG,
+ HCLGE_COMM_NIC_CSQ_DEPTH_REG,
+ HCLGE_COMM_NIC_CSQ_TAIL_REG,
+ HCLGE_COMM_NIC_CSQ_HEAD_REG,
+ HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG,
+ HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG,
+ HCLGE_COMM_NIC_CRQ_DEPTH_REG,
+ HCLGE_COMM_NIC_CRQ_TAIL_REG,
+ HCLGE_COMM_NIC_CRQ_HEAD_REG,
+ HCLGE_COMM_VECTOR0_CMDQ_SRC_REG,
+ HCLGE_COMM_VECTOR0_CMDQ_STATE_REG,
+ HCLGE_COMM_CMDQ_INTR_EN_REG,
+ HCLGE_COMM_CMDQ_INTR_GEN_REG};
+
+static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE,
+ HCLGEVF_RST_ING,
+ HCLGEVF_GRO_EN_REG};
+
+static const u32 ring_reg_addr_list[] = {HCLGEVF_RING_RX_ADDR_L_REG,
+ HCLGEVF_RING_RX_ADDR_H_REG,
+ HCLGEVF_RING_RX_BD_NUM_REG,
+ HCLGEVF_RING_RX_BD_LENGTH_REG,
+ HCLGEVF_RING_RX_MERGE_EN_REG,
+ HCLGEVF_RING_RX_TAIL_REG,
+ HCLGEVF_RING_RX_HEAD_REG,
+ HCLGEVF_RING_RX_FBD_NUM_REG,
+ HCLGEVF_RING_RX_OFFSET_REG,
+ HCLGEVF_RING_RX_FBD_OFFSET_REG,
+ HCLGEVF_RING_RX_STASH_REG,
+ HCLGEVF_RING_RX_BD_ERR_REG,
+ HCLGEVF_RING_TX_ADDR_L_REG,
+ HCLGEVF_RING_TX_ADDR_H_REG,
+ HCLGEVF_RING_TX_BD_NUM_REG,
+ HCLGEVF_RING_TX_PRIORITY_REG,
+ HCLGEVF_RING_TX_TC_REG,
+ HCLGEVF_RING_TX_MERGE_EN_REG,
+ HCLGEVF_RING_TX_TAIL_REG,
+ HCLGEVF_RING_TX_HEAD_REG,
+ HCLGEVF_RING_TX_FBD_NUM_REG,
+ HCLGEVF_RING_TX_OFFSET_REG,
+ HCLGEVF_RING_TX_EBD_NUM_REG,
+ HCLGEVF_RING_TX_EBD_OFFSET_REG,
+ HCLGEVF_RING_TX_BD_ERR_REG,
+ HCLGEVF_RING_EN_REG};
+
+static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG,
+ HCLGEVF_TQP_INTR_GL0_REG,
+ HCLGEVF_TQP_INTR_GL1_REG,
+ HCLGEVF_TQP_INTR_GL2_REG,
+ HCLGEVF_TQP_INTR_RL_REG};
+
+enum hclgevf_reg_tag {
+ HCLGEVF_REG_TAG_CMDQ = 0,
+ HCLGEVF_REG_TAG_COMMON,
+ HCLGEVF_REG_TAG_RING,
+ HCLGEVF_REG_TAG_TQP_INTR,
+};
+
+#pragma pack(4)
+struct hclgevf_reg_tlv {
+ u16 tag;
+ u16 len;
+};
+
+struct hclgevf_reg_header {
+ u64 magic_number;
+ u8 is_vf;
+ u8 rsv[7];
+};
+
+#pragma pack()
+
+#define HCLGEVF_REG_TLV_SIZE sizeof(struct hclgevf_reg_tlv)
+#define HCLGEVF_REG_HEADER_SIZE sizeof(struct hclgevf_reg_header)
+#define HCLGEVF_REG_TLV_SPACE (sizeof(struct hclgevf_reg_tlv) / sizeof(u32))
+#define HCLGEVF_REG_HEADER_SPACE (sizeof(struct hclgevf_reg_header) / sizeof(u32))
+#define HCLGEVF_REG_MAGIC_NUMBER 0x686e733372656773 /* meaning is hns3regs */
+
+static u32 hclgevf_reg_get_header(void *data)
+{
+ struct hclgevf_reg_header *header = data;
+
+ header->magic_number = HCLGEVF_REG_MAGIC_NUMBER;
+ header->is_vf = 0x1;
+
+ return HCLGEVF_REG_HEADER_SPACE;
+}
+
+static u32 hclgevf_reg_get_tlv(u32 tag, u32 regs_num, void *data)
+{
+ struct hclgevf_reg_tlv *tlv = data;
+
+ tlv->tag = tag;
+ tlv->len = regs_num * sizeof(u32) + HCLGEVF_REG_TLV_SIZE;
+
+ return HCLGEVF_REG_TLV_SPACE;
+}
+
+int hclgevf_get_regs_len(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ int cmdq_len, common_len, ring_len, tqp_intr_len;
+
+ cmdq_len = HCLGEVF_REG_TLV_SIZE + sizeof(cmdq_reg_addr_list);
+ common_len = HCLGEVF_REG_TLV_SIZE + sizeof(common_reg_addr_list);
+ ring_len = HCLGEVF_REG_TLV_SIZE + sizeof(ring_reg_addr_list);
+ tqp_intr_len = HCLGEVF_REG_TLV_SIZE + sizeof(tqp_intr_reg_addr_list);
+
+ /* return the total length of all register values */
+ return HCLGEVF_REG_HEADER_SIZE + cmdq_len + common_len +
+ tqp_intr_len * (hdev->num_msi_used - 1) +
+ ring_len * hdev->num_tqps;
+}
+
+void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
+ void *data)
+{
+#define HCLGEVF_RING_REG_OFFSET 0x200
+#define HCLGEVF_RING_INT_REG_OFFSET 0x4
+
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ int i, j, reg_um;
+ u32 *reg = data;
+
+ *version = hdev->fw_version;
+ reg += hclgevf_reg_get_header(reg);
+
+ /* fetching per-VF registers values from VF PCIe register space */
+ reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
+ reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_CMDQ, reg_um, reg);
+ for (i = 0; i < reg_um; i++)
+ *reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
+
+ reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
+ reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_COMMON, reg_um, reg);
+ for (i = 0; i < reg_um; i++)
+ *reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]);
+
+ reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
+ for (j = 0; j < hdev->num_tqps; j++) {
+ reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_RING, reg_um, reg);
+ for (i = 0; i < reg_um; i++)
+ *reg++ = hclgevf_read_dev(&hdev->hw,
+ ring_reg_addr_list[i] +
+ HCLGEVF_RING_REG_OFFSET * j);
+ }
+
+ reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
+ for (j = 0; j < hdev->num_msi_used - 1; j++) {
+ reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_TQP_INTR, reg_um, reg);
+ for (i = 0; i < reg_um; i++)
+ *reg++ = hclgevf_read_dev(&hdev->hw,
+ tqp_intr_reg_addr_list[i] +
+ HCLGEVF_RING_INT_REG_OFFSET * j);
+ }
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.h
new file mode 100644
index 0000000000..77bdcf60a1
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2023 Hisilicon Limited. */
+
+#ifndef __HCLGEVF_REGS_H
+#define __HCLGEVF_REGS_H
+#include <linux/types.h>
+
+struct hnae3_handle;
+
+int hclgevf_get_regs_len(struct hnae3_handle *handle);
+void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
+ void *data);
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h
new file mode 100644
index 0000000000..5d4895bb57
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2018-2019 Hisilicon Limited. */
+
+/* This must be outside ifdef _HCLGEVF_TRACE_H */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hns3
+
+#if !defined(_HCLGEVF_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _HCLGEVF_TRACE_H_
+
+#include <linux/tracepoint.h>
+
+#define VF_GET_MBX_LEN (sizeof(struct hclge_mbx_pf_to_vf_cmd) / sizeof(u32))
+#define VF_SEND_MBX_LEN (sizeof(struct hclge_mbx_vf_to_pf_cmd) / sizeof(u32))
+
+TRACE_EVENT(hclge_vf_mbx_get,
+ TP_PROTO(
+ struct hclgevf_dev *hdev,
+ struct hclge_mbx_pf_to_vf_cmd *req),
+ TP_ARGS(hdev, req),
+
+ TP_STRUCT__entry(
+ __field(u8, vfid)
+ __field(u16, code)
+ __string(pciname, pci_name(hdev->pdev))
+ __string(devname, &hdev->nic.kinfo.netdev->name)
+ __array(u32, mbx_data, VF_GET_MBX_LEN)
+ ),
+
+ TP_fast_assign(
+ __entry->vfid = req->dest_vfid;
+ __entry->code = le16_to_cpu(req->msg.code);
+ __assign_str(pciname, pci_name(hdev->pdev));
+ __assign_str(devname, &hdev->nic.kinfo.netdev->name);
+ memcpy(__entry->mbx_data, req,
+ sizeof(struct hclge_mbx_pf_to_vf_cmd));
+ ),
+
+ TP_printk(
+ "%s %s vfid:%u code:%u data:%s",
+ __get_str(pciname), __get_str(devname), __entry->vfid,
+ __entry->code,
+ __print_array(__entry->mbx_data, VF_GET_MBX_LEN, sizeof(u32))
+ )
+);
+
+TRACE_EVENT(hclge_vf_mbx_send,
+ TP_PROTO(
+ struct hclgevf_dev *hdev,
+ struct hclge_mbx_vf_to_pf_cmd *req),
+ TP_ARGS(hdev, req),
+
+ TP_STRUCT__entry(
+ __field(u8, vfid)
+ __field(u8, code)
+ __field(u8, subcode)
+ __string(pciname, pci_name(hdev->pdev))
+ __string(devname, &hdev->nic.kinfo.netdev->name)
+ __array(u32, mbx_data, VF_SEND_MBX_LEN)
+ ),
+
+ TP_fast_assign(
+ __entry->vfid = req->mbx_src_vfid;
+ __entry->code = req->msg.code;
+ __entry->subcode = req->msg.subcode;
+ __assign_str(pciname, pci_name(hdev->pdev));
+ __assign_str(devname, &hdev->nic.kinfo.netdev->name);
+ memcpy(__entry->mbx_data, req,
+ sizeof(struct hclge_mbx_vf_to_pf_cmd));
+ ),
+
+ TP_printk(
+ "%s %s vfid:%u code:%u subcode:%u data:%s",
+ __get_str(pciname), __get_str(devname), __entry->vfid,
+ __entry->code, __entry->subcode,
+ __print_array(__entry->mbx_data, VF_SEND_MBX_LEN, sizeof(u32))
+ )
+);
+
+#endif /* _HCLGEVF_TRACE_H_ */
+
+/* This must be outside ifdef _HCLGEVF_TRACE_H */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE hclgevf_trace
+#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
new file mode 100644
index 0000000000..409a89d802
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -0,0 +1,652 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ */
+
+#include <linux/acpi.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/netdevice.h>
+#include <linux/of_address.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/of_platform.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define MDIO_DRV_NAME "Hi-HNS_MDIO"
+#define MDIO_BUS_NAME "Hisilicon MII Bus"
+
+#define MDIO_TIMEOUT 1000000
+
+struct hns_mdio_sc_reg {
+ u16 mdio_clk_en;
+ u16 mdio_clk_dis;
+ u16 mdio_reset_req;
+ u16 mdio_reset_dreq;
+ u16 mdio_clk_st;
+ u16 mdio_reset_st;
+};
+
+struct hns_mdio_device {
+ u8 __iomem *vbase; /* mdio reg base address */
+ struct regmap *subctrl_vbase;
+ struct hns_mdio_sc_reg sc_reg;
+};
+
+/* mdio reg */
+#define MDIO_COMMAND_REG 0x0
+#define MDIO_ADDR_REG 0x4
+#define MDIO_WDATA_REG 0x8
+#define MDIO_RDATA_REG 0xc
+#define MDIO_STA_REG 0x10
+
+/* cfg phy bit map */
+#define MDIO_CMD_DEVAD_M 0x1f
+#define MDIO_CMD_DEVAD_S 0
+#define MDIO_CMD_PRTAD_M 0x1f
+#define MDIO_CMD_PRTAD_S 5
+#define MDIO_CMD_OP_S 10
+#define MDIO_CMD_ST_S 12
+#define MDIO_CMD_START_B 14
+
+#define MDIO_ADDR_DATA_M 0xffff
+#define MDIO_ADDR_DATA_S 0
+
+#define MDIO_WDATA_DATA_M 0xffff
+#define MDIO_WDATA_DATA_S 0
+
+#define MDIO_RDATA_DATA_M 0xffff
+#define MDIO_RDATA_DATA_S 0
+
+#define MDIO_STATE_STA_B 0
+
+enum mdio_st_clause {
+ MDIO_ST_CLAUSE_45 = 0,
+ MDIO_ST_CLAUSE_22
+};
+
+enum mdio_c22_op_seq {
+ MDIO_C22_WRITE = 1,
+ MDIO_C22_READ = 2
+};
+
+enum mdio_c45_op_seq {
+ MDIO_C45_WRITE_ADDR = 0,
+ MDIO_C45_WRITE_DATA,
+ MDIO_C45_READ_INCREMENT,
+ MDIO_C45_READ
+};
+
+/* peri subctrl reg */
+#define MDIO_SC_CLK_EN 0x338
+#define MDIO_SC_CLK_DIS 0x33C
+#define MDIO_SC_RESET_REQ 0xA38
+#define MDIO_SC_RESET_DREQ 0xA3C
+#define MDIO_SC_CLK_ST 0x531C
+#define MDIO_SC_RESET_ST 0x5A1C
+
+static void mdio_write_reg(u8 __iomem *base, u32 reg, u32 value)
+{
+ writel_relaxed(value, base + reg);
+}
+
+#define MDIO_WRITE_REG(a, reg, value) \
+ mdio_write_reg((a)->vbase, (reg), (value))
+
+static u32 mdio_read_reg(u8 __iomem *base, u32 reg)
+{
+ return readl_relaxed(base + reg);
+}
+
+#define mdio_set_field(origin, mask, shift, val) \
+ do { \
+ (origin) &= (~((mask) << (shift))); \
+ (origin) |= (((val) & (mask)) << (shift)); \
+ } while (0)
+
+#define mdio_get_field(origin, mask, shift) (((origin) >> (shift)) & (mask))
+
+static void mdio_set_reg_field(u8 __iomem *base, u32 reg, u32 mask, u32 shift,
+ u32 val)
+{
+ u32 origin = mdio_read_reg(base, reg);
+
+ mdio_set_field(origin, mask, shift, val);
+ mdio_write_reg(base, reg, origin);
+}
+
+#define MDIO_SET_REG_FIELD(dev, reg, mask, shift, val) \
+ mdio_set_reg_field((dev)->vbase, (reg), (mask), (shift), (val))
+
+static u32 mdio_get_reg_field(u8 __iomem *base, u32 reg, u32 mask, u32 shift)
+{
+ u32 origin;
+
+ origin = mdio_read_reg(base, reg);
+ return mdio_get_field(origin, mask, shift);
+}
+
+#define MDIO_GET_REG_FIELD(dev, reg, mask, shift) \
+ mdio_get_reg_field((dev)->vbase, (reg), (mask), (shift))
+
+#define MDIO_GET_REG_BIT(dev, reg, bit) \
+ mdio_get_reg_field((dev)->vbase, (reg), 0x1ull, (bit))
+
+#define MDIO_CHECK_SET_ST 1
+#define MDIO_CHECK_CLR_ST 0
+
+static int mdio_sc_cfg_reg_write(struct hns_mdio_device *mdio_dev,
+ u32 cfg_reg, u32 set_val,
+ u32 st_reg, u32 st_msk, u8 check_st)
+{
+ u32 time_cnt;
+ u32 reg_value;
+ int ret;
+
+ regmap_write(mdio_dev->subctrl_vbase, cfg_reg, set_val);
+
+ for (time_cnt = MDIO_TIMEOUT; time_cnt; time_cnt--) {
+ ret = regmap_read(mdio_dev->subctrl_vbase, st_reg, &reg_value);
+ if (ret)
+ return ret;
+
+ reg_value &= st_msk;
+ if ((!!check_st) == (!!reg_value))
+ break;
+ }
+
+ if ((!!check_st) != (!!reg_value))
+ return -EBUSY;
+
+ return 0;
+}
+
+static int hns_mdio_wait_ready(struct mii_bus *bus)
+{
+ struct hns_mdio_device *mdio_dev = bus->priv;
+ u32 cmd_reg_value;
+ int i;
+
+ /* waiting for MDIO_COMMAND_REG's mdio_start==0 */
+ /* after that can do read or write*/
+ for (i = 0; i < MDIO_TIMEOUT; i++) {
+ cmd_reg_value = MDIO_GET_REG_BIT(mdio_dev,
+ MDIO_COMMAND_REG,
+ MDIO_CMD_START_B);
+ if (!cmd_reg_value)
+ break;
+ }
+ if ((i == MDIO_TIMEOUT) && cmd_reg_value)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static void hns_mdio_cmd_write(struct hns_mdio_device *mdio_dev,
+ u8 is_c45, u8 op, u8 phy_id, u16 cmd)
+{
+ u32 cmd_reg_value;
+ u8 st = is_c45 ? MDIO_ST_CLAUSE_45 : MDIO_ST_CLAUSE_22;
+
+ cmd_reg_value = st << MDIO_CMD_ST_S;
+ cmd_reg_value |= op << MDIO_CMD_OP_S;
+ cmd_reg_value |=
+ (phy_id & MDIO_CMD_PRTAD_M) << MDIO_CMD_PRTAD_S;
+ cmd_reg_value |= (cmd & MDIO_CMD_DEVAD_M) << MDIO_CMD_DEVAD_S;
+ cmd_reg_value |= 1 << MDIO_CMD_START_B;
+
+ MDIO_WRITE_REG(mdio_dev, MDIO_COMMAND_REG, cmd_reg_value);
+}
+
+/**
+ * hns_mdio_write_c22 - access phy register
+ * @bus: mdio bus
+ * @phy_id: phy id
+ * @regnum: register num
+ * @data: register value
+ *
+ * Return 0 on success, negative on failure
+ */
+static int hns_mdio_write_c22(struct mii_bus *bus,
+ int phy_id, int regnum, u16 data)
+{
+ struct hns_mdio_device *mdio_dev = bus->priv;
+ u16 reg = (u16)(regnum & 0xffff);
+ u16 cmd_reg_cfg;
+ int ret;
+ u8 op;
+
+ dev_dbg(&bus->dev, "mdio write %s,base is %p\n",
+ bus->id, mdio_dev->vbase);
+ dev_dbg(&bus->dev, "phy id=%d, reg=%#x, write data=%d\n",
+ phy_id, reg, data);
+
+ /* wait for ready */
+ ret = hns_mdio_wait_ready(bus);
+ if (ret) {
+ dev_err(&bus->dev, "MDIO bus is busy\n");
+ return ret;
+ }
+
+ cmd_reg_cfg = reg;
+ op = MDIO_C22_WRITE;
+
+ MDIO_SET_REG_FIELD(mdio_dev, MDIO_WDATA_REG, MDIO_WDATA_DATA_M,
+ MDIO_WDATA_DATA_S, data);
+
+ hns_mdio_cmd_write(mdio_dev, false, op, phy_id, cmd_reg_cfg);
+
+ return 0;
+}
+
+/**
+ * hns_mdio_write_c45 - access phy register
+ * @bus: mdio bus
+ * @phy_id: phy id
+ * @devad: device address to read
+ * @regnum: register num
+ * @data: register value
+ *
+ * Return 0 on success, negative on failure
+ */
+static int hns_mdio_write_c45(struct mii_bus *bus, int phy_id, int devad,
+ int regnum, u16 data)
+{
+ struct hns_mdio_device *mdio_dev = bus->priv;
+ u16 reg = (u16)(regnum & 0xffff);
+ u16 cmd_reg_cfg;
+ int ret;
+ u8 op;
+
+ dev_dbg(&bus->dev, "mdio write %s,base is %p\n",
+ bus->id, mdio_dev->vbase);
+ dev_dbg(&bus->dev, "phy id=%d, devad=%d, reg=%#x, write data=%d\n",
+ phy_id, devad, reg, data);
+
+ /* wait for ready */
+ ret = hns_mdio_wait_ready(bus);
+ if (ret) {
+ dev_err(&bus->dev, "MDIO bus is busy\n");
+ return ret;
+ }
+
+ /* config the cmd-reg to write addr*/
+ MDIO_SET_REG_FIELD(mdio_dev, MDIO_ADDR_REG, MDIO_ADDR_DATA_M,
+ MDIO_ADDR_DATA_S, reg);
+
+ hns_mdio_cmd_write(mdio_dev, true, MDIO_C45_WRITE_ADDR, phy_id, devad);
+
+ /* check for read or write opt is finished */
+ ret = hns_mdio_wait_ready(bus);
+ if (ret) {
+ dev_err(&bus->dev, "MDIO bus is busy\n");
+ return ret;
+ }
+
+ /* config the data needed writing */
+ cmd_reg_cfg = devad;
+ op = MDIO_C45_WRITE_DATA;
+
+ MDIO_SET_REG_FIELD(mdio_dev, MDIO_WDATA_REG, MDIO_WDATA_DATA_M,
+ MDIO_WDATA_DATA_S, data);
+
+ hns_mdio_cmd_write(mdio_dev, true, op, phy_id, cmd_reg_cfg);
+
+ return 0;
+}
+
+/**
+ * hns_mdio_read_c22 - access phy register
+ * @bus: mdio bus
+ * @phy_id: phy id
+ * @regnum: register num
+ *
+ * Return phy register value
+ */
+static int hns_mdio_read_c22(struct mii_bus *bus, int phy_id, int regnum)
+{
+ struct hns_mdio_device *mdio_dev = bus->priv;
+ u16 reg = (u16)(regnum & 0xffff);
+ u16 reg_val;
+ int ret;
+
+ dev_dbg(&bus->dev, "mdio read %s,base is %p\n",
+ bus->id, mdio_dev->vbase);
+ dev_dbg(&bus->dev, "phy id=%d, reg=%#x!\n", phy_id, reg);
+
+ /* Step 1: wait for ready */
+ ret = hns_mdio_wait_ready(bus);
+ if (ret) {
+ dev_err(&bus->dev, "MDIO bus is busy\n");
+ return ret;
+ }
+
+ hns_mdio_cmd_write(mdio_dev, false, MDIO_C22_READ, phy_id, reg);
+
+ /* Step 2: waiting for MDIO_COMMAND_REG 's mdio_start==0,*/
+ /* check for read or write opt is finished */
+ ret = hns_mdio_wait_ready(bus);
+ if (ret) {
+ dev_err(&bus->dev, "MDIO bus is busy\n");
+ return ret;
+ }
+
+ reg_val = MDIO_GET_REG_BIT(mdio_dev, MDIO_STA_REG, MDIO_STATE_STA_B);
+ if (reg_val) {
+ dev_err(&bus->dev, " ERROR! MDIO Read failed!\n");
+ return -EBUSY;
+ }
+
+ /* Step 3; get out data*/
+ reg_val = (u16)MDIO_GET_REG_FIELD(mdio_dev, MDIO_RDATA_REG,
+ MDIO_RDATA_DATA_M, MDIO_RDATA_DATA_S);
+
+ return reg_val;
+}
+
+/**
+ * hns_mdio_read_c45 - access phy register
+ * @bus: mdio bus
+ * @phy_id: phy id
+ * @devad: device address to read
+ * @regnum: register num
+ *
+ * Return phy register value
+ */
+static int hns_mdio_read_c45(struct mii_bus *bus, int phy_id, int devad,
+ int regnum)
+{
+ struct hns_mdio_device *mdio_dev = bus->priv;
+ u16 reg = (u16)(regnum & 0xffff);
+ u16 reg_val;
+ int ret;
+
+ dev_dbg(&bus->dev, "mdio read %s,base is %p\n",
+ bus->id, mdio_dev->vbase);
+ dev_dbg(&bus->dev, "phy id=%d, devad=%d, reg=%#x!\n",
+ phy_id, devad, reg);
+
+ /* Step 1: wait for ready */
+ ret = hns_mdio_wait_ready(bus);
+ if (ret) {
+ dev_err(&bus->dev, "MDIO bus is busy\n");
+ return ret;
+ }
+
+ MDIO_SET_REG_FIELD(mdio_dev, MDIO_ADDR_REG, MDIO_ADDR_DATA_M,
+ MDIO_ADDR_DATA_S, reg);
+
+ /* Step 2; config the cmd-reg to write addr*/
+ hns_mdio_cmd_write(mdio_dev, true, MDIO_C45_WRITE_ADDR, phy_id, devad);
+
+ /* Step 3: check for read or write opt is finished */
+ ret = hns_mdio_wait_ready(bus);
+ if (ret) {
+ dev_err(&bus->dev, "MDIO bus is busy\n");
+ return ret;
+ }
+
+ hns_mdio_cmd_write(mdio_dev, true, MDIO_C45_READ, phy_id, devad);
+
+ /* Step 5: waiting for MDIO_COMMAND_REG 's mdio_start==0,*/
+ /* check for read or write opt is finished */
+ ret = hns_mdio_wait_ready(bus);
+ if (ret) {
+ dev_err(&bus->dev, "MDIO bus is busy\n");
+ return ret;
+ }
+
+ reg_val = MDIO_GET_REG_BIT(mdio_dev, MDIO_STA_REG, MDIO_STATE_STA_B);
+ if (reg_val) {
+ dev_err(&bus->dev, " ERROR! MDIO Read failed!\n");
+ return -EBUSY;
+ }
+
+ /* Step 6; get out data*/
+ reg_val = (u16)MDIO_GET_REG_FIELD(mdio_dev, MDIO_RDATA_REG,
+ MDIO_RDATA_DATA_M, MDIO_RDATA_DATA_S);
+
+ return reg_val;
+}
+
+/**
+ * hns_mdio_reset - reset mdio bus
+ * @bus: mdio bus
+ *
+ * Return 0 on success, negative on failure
+ */
+static int hns_mdio_reset(struct mii_bus *bus)
+{
+ struct hns_mdio_device *mdio_dev = bus->priv;
+ const struct hns_mdio_sc_reg *sc_reg;
+ int ret;
+
+ if (dev_of_node(bus->parent)) {
+ if (!mdio_dev->subctrl_vbase) {
+ dev_err(&bus->dev, "mdio sys ctl reg has not mapped\n");
+ return -ENODEV;
+ }
+
+ sc_reg = &mdio_dev->sc_reg;
+ /* 1. reset req, and read reset st check */
+ ret = mdio_sc_cfg_reg_write(mdio_dev, sc_reg->mdio_reset_req,
+ 0x1, sc_reg->mdio_reset_st, 0x1,
+ MDIO_CHECK_SET_ST);
+ if (ret) {
+ dev_err(&bus->dev, "MDIO reset fail\n");
+ return ret;
+ }
+
+ /* 2. dis clk, and read clk st check */
+ ret = mdio_sc_cfg_reg_write(mdio_dev, sc_reg->mdio_clk_dis,
+ 0x1, sc_reg->mdio_clk_st, 0x1,
+ MDIO_CHECK_CLR_ST);
+ if (ret) {
+ dev_err(&bus->dev, "MDIO dis clk fail\n");
+ return ret;
+ }
+
+ /* 3. reset dreq, and read reset st check */
+ ret = mdio_sc_cfg_reg_write(mdio_dev, sc_reg->mdio_reset_dreq,
+ 0x1, sc_reg->mdio_reset_st, 0x1,
+ MDIO_CHECK_CLR_ST);
+ if (ret) {
+ dev_err(&bus->dev, "MDIO dis clk fail\n");
+ return ret;
+ }
+
+ /* 4. en clk, and read clk st check */
+ ret = mdio_sc_cfg_reg_write(mdio_dev, sc_reg->mdio_clk_en,
+ 0x1, sc_reg->mdio_clk_st, 0x1,
+ MDIO_CHECK_SET_ST);
+ if (ret)
+ dev_err(&bus->dev, "MDIO en clk fail\n");
+ } else if (is_acpi_node(bus->parent->fwnode)) {
+ acpi_status s;
+
+ s = acpi_evaluate_object(ACPI_HANDLE(bus->parent),
+ "_RST", NULL, NULL);
+ if (ACPI_FAILURE(s)) {
+ dev_err(&bus->dev, "Reset failed, return:%#x\n", s);
+ ret = -EBUSY;
+ } else {
+ ret = 0;
+ }
+ } else {
+ dev_err(&bus->dev, "Can not get cfg data from DT or ACPI\n");
+ ret = -ENXIO;
+ }
+ return ret;
+}
+
+/**
+ * hns_mdio_probe - probe mdio device
+ * @pdev: mdio platform device
+ *
+ * Return 0 on success, negative on failure
+ */
+static int hns_mdio_probe(struct platform_device *pdev)
+{
+ struct hns_mdio_device *mdio_dev;
+ struct mii_bus *new_bus;
+ int ret;
+
+ if (!pdev) {
+ dev_err(NULL, "pdev is NULL!\r\n");
+ return -ENODEV;
+ }
+
+ mdio_dev = devm_kzalloc(&pdev->dev, sizeof(*mdio_dev), GFP_KERNEL);
+ if (!mdio_dev)
+ return -ENOMEM;
+
+ new_bus = devm_mdiobus_alloc(&pdev->dev);
+ if (!new_bus) {
+ dev_err(&pdev->dev, "mdiobus_alloc fail!\n");
+ return -ENOMEM;
+ }
+
+ new_bus->name = MDIO_BUS_NAME;
+ new_bus->read = hns_mdio_read_c22;
+ new_bus->write = hns_mdio_write_c22;
+ new_bus->read_c45 = hns_mdio_read_c45;
+ new_bus->write_c45 = hns_mdio_write_c45;
+ new_bus->reset = hns_mdio_reset;
+ new_bus->priv = mdio_dev;
+ new_bus->parent = &pdev->dev;
+
+ mdio_dev->vbase = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mdio_dev->vbase)) {
+ ret = PTR_ERR(mdio_dev->vbase);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, new_bus);
+ snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%s", "Mii",
+ dev_name(&pdev->dev));
+ if (dev_of_node(&pdev->dev)) {
+ struct of_phandle_args reg_args;
+
+ ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
+ "subctrl-vbase",
+ 4,
+ 0,
+ &reg_args);
+ if (!ret) {
+ mdio_dev->subctrl_vbase =
+ syscon_node_to_regmap(reg_args.np);
+ if (IS_ERR(mdio_dev->subctrl_vbase)) {
+ dev_warn(&pdev->dev, "syscon_node_to_regmap error\n");
+ mdio_dev->subctrl_vbase = NULL;
+ } else {
+ if (reg_args.args_count == 4) {
+ mdio_dev->sc_reg.mdio_clk_en =
+ (u16)reg_args.args[0];
+ mdio_dev->sc_reg.mdio_clk_dis =
+ (u16)reg_args.args[0] + 4;
+ mdio_dev->sc_reg.mdio_reset_req =
+ (u16)reg_args.args[1];
+ mdio_dev->sc_reg.mdio_reset_dreq =
+ (u16)reg_args.args[1] + 4;
+ mdio_dev->sc_reg.mdio_clk_st =
+ (u16)reg_args.args[2];
+ mdio_dev->sc_reg.mdio_reset_st =
+ (u16)reg_args.args[3];
+ } else {
+ /* for compatible */
+ mdio_dev->sc_reg.mdio_clk_en =
+ MDIO_SC_CLK_EN;
+ mdio_dev->sc_reg.mdio_clk_dis =
+ MDIO_SC_CLK_DIS;
+ mdio_dev->sc_reg.mdio_reset_req =
+ MDIO_SC_RESET_REQ;
+ mdio_dev->sc_reg.mdio_reset_dreq =
+ MDIO_SC_RESET_DREQ;
+ mdio_dev->sc_reg.mdio_clk_st =
+ MDIO_SC_CLK_ST;
+ mdio_dev->sc_reg.mdio_reset_st =
+ MDIO_SC_RESET_ST;
+ }
+ }
+ } else {
+ dev_warn(&pdev->dev, "find syscon ret = %#x\n", ret);
+ mdio_dev->subctrl_vbase = NULL;
+ }
+
+ ret = of_mdiobus_register(new_bus, pdev->dev.of_node);
+ } else if (is_acpi_node(pdev->dev.fwnode)) {
+ /* Clear all the IRQ properties */
+ memset(new_bus->irq, PHY_POLL, 4 * PHY_MAX_ADDR);
+
+ /* Mask out all PHYs from auto probing. */
+ new_bus->phy_mask = ~0;
+
+ /* Register the MDIO bus */
+ ret = mdiobus_register(new_bus);
+ } else {
+ dev_err(&pdev->dev, "Can not get cfg data from DT or ACPI\n");
+ ret = -ENXIO;
+ }
+
+ if (ret) {
+ dev_err(&pdev->dev, "Cannot register as MDIO bus!\n");
+ platform_set_drvdata(pdev, NULL);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * hns_mdio_remove - remove mdio device
+ * @pdev: mdio platform device
+ *
+ * Return 0 on success, negative on failure
+ */
+static int hns_mdio_remove(struct platform_device *pdev)
+{
+ struct mii_bus *bus;
+
+ bus = platform_get_drvdata(pdev);
+
+ mdiobus_unregister(bus);
+ platform_set_drvdata(pdev, NULL);
+ return 0;
+}
+
+static const struct of_device_id hns_mdio_match[] = {
+ {.compatible = "hisilicon,mdio"},
+ {.compatible = "hisilicon,hns-mdio"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, hns_mdio_match);
+
+static const struct acpi_device_id hns_mdio_acpi_match[] = {
+ { "HISI0141", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, hns_mdio_acpi_match);
+
+static struct platform_driver hns_mdio_driver = {
+ .probe = hns_mdio_probe,
+ .remove = hns_mdio_remove,
+ .driver = {
+ .name = MDIO_DRV_NAME,
+ .of_match_table = hns_mdio_match,
+ .acpi_match_table = ACPI_PTR(hns_mdio_acpi_match),
+ },
+};
+
+module_platform_driver(hns_mdio_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
+MODULE_DESCRIPTION("Hisilicon HNS MDIO driver");
+MODULE_ALIAS("platform:" MDIO_DRV_NAME);